source
stringlengths
3
92
c
stringlengths
26
2.25M
com1.c
#include <mpi.h> extern int local_cell_blocks; extern int local_edge_blocks; #include "grid.h" #include "memory.h" #include "component.h" #include "io.h" #include <stdint.h> void com1_init(GRID * g); void com1_compute(GRID * g); void com1_io(GRID * g); double com1_flops(GRID * g); double com1_memory(GRID * g); uint64_t com1_checksum(GRID *); void com1_cleanup(GRID * g); void grad(GRID * g); void dvg(GRID * g); void step(GRID * g); struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_temp; struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_grad; struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_dvg; static io_var_t io_gv_temp; static io_var_t io_gv_grad; static io_var_t io_gv_dvg; MODEL_COMPONENT com1 = { 0, com1_init, com1_compute, com1_io, com1_flops, com1_memory, com1_checksum, com1_cleanup }; void com1_init(GRID * g) { com1.loaded = 1; { int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); gv_temp = malloc(24); gv_temp->name = "gv_temp"; gv_temp->loc = 0; gv_temp->dim = 3; gv_temp->data_pointer.p3 = malloc((num_blocks * g->height * g->blkSize) * sizeof(GVAL) + (num_blocks * g->height) * sizeof(char *) + (num_blocks) * sizeof(char *)); char *pos = (char *) gv_temp->data_pointer.p3 + num_blocks * sizeof(char *); char *pos2 = (char *) gv_temp->data_pointer.p3 + num_blocks * sizeof(char *) + num_blocks * g->height * sizeof(char *); for (int b = 0; b < num_blocks; b++) { gv_temp->data_pointer.p3[b] = (GVAL * *)pos; pos += g->height * sizeof(char *); for (int k = 0; k < g->height; k++) { gv_temp->data_pointer.p3[b][k] = (GVAL *) pos2; pos2 += g->blkSize * sizeof(GVAL); for (int c = 0; c < g->blkSize; c++) { gv_temp->data_pointer.p3[b][k][c] = (GVAL) 0; } } } } { int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); gv_grad = malloc(24); gv_grad->name = "gv_grad"; gv_grad->loc = 1; gv_grad->dim = 3; gv_grad->data_pointer.p3 = malloc((num_blocks * g->height * g->blkSize) * sizeof(GVAL) + (num_blocks * g->height) * sizeof(char *) + (num_blocks) * sizeof(char *)); char *pos = (char *) gv_grad->data_pointer.p3 + num_blocks * sizeof(char *); char *pos2 = (char *) gv_grad->data_pointer.p3 + num_blocks * sizeof(char *) + num_blocks * g->height * sizeof(char *); for (int b = 0; b < num_blocks; b++) { gv_grad->data_pointer.p3[b] = (GVAL * *)pos; pos += g->height * sizeof(char *); for (int k = 0; k < g->height; k++) { gv_grad->data_pointer.p3[b][k] = (GVAL *) pos2; pos2 += g->blkSize * sizeof(GVAL); for (int e = 0; e < g->blkSize; e++) { gv_grad->data_pointer.p3[b][k][e] = (GVAL) 0; } } } } { int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); gv_dvg = malloc(24); gv_dvg->name = "gv_dvg"; gv_dvg->loc = 0; gv_dvg->dim = 3; gv_dvg->data_pointer.p3 = malloc((num_blocks * g->height * g->blkSize) * sizeof(GVAL) + (num_blocks * g->height) * sizeof(char *) + (num_blocks) * sizeof(char *)); char *pos = (char *) gv_dvg->data_pointer.p3 + num_blocks * sizeof(char *); char *pos2 = (char *) gv_dvg->data_pointer.p3 + num_blocks * sizeof(char *) + num_blocks * g->height * sizeof(char *); for (int b = 0; b < num_blocks; b++) { gv_dvg->data_pointer.p3[b] = (GVAL * *)pos; pos += g->height * sizeof(char *); for (int k = 0; k < g->height; k++) { gv_dvg->data_pointer.p3[b][k] = (GVAL *) pos2; pos2 += g->blkSize * sizeof(GVAL); for (int c = 0; c < g->blkSize; c++) { gv_dvg->data_pointer.p3[b][k][c] = (GVAL) 0; } } } } io_read_register(g, "gv_temp", (GVAL *) gv_temp, FLOAT32, FLOAT32, GRID_POS_CELL, GRID_DIM_3D); io_write_define(g, "gv_temp", (GVAL *) gv_temp, FLOAT32, GRID_POS_CELL, GRID_DIM_3D, &io_gv_temp); io_write_define(g, "gv_grad", (GVAL *) gv_grad, FLOAT32, GRID_POS_EDGE, GRID_DIM_3D, &io_gv_grad); io_write_define(g, "gv_dvg", (GVAL *) gv_dvg, FLOAT32, GRID_POS_CELL, GRID_DIM_3D, &io_gv_dvg); } void com1_compute(GRID * g) { grad(g); dvg(g); step(g); } void com1_io(GRID * g) { io_write_announce(g, &io_gv_grad); io_write_announce(g, &io_gv_dvg); } double com1_flops(GRID * g) { double flop = (double) g->edgeCount * (double) g->height + 2.0 * (double) NBRS * (double) g->cellCount * (double) g->height + 2.0 * (double) g->cellCount * (double) g->height; return flop; } double com1_memory(GRID * g) { double mem = ((double) g->edgeCount * (double) g->height + 2.0 * (double) g->cellCount * (double) g->height) * sizeof(GVAL); return mem / (1024 * 1024); } uint64_t com1_checksum(GRID * g) { uint64_t ret = 0; { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { ret += (uint64_t) gv_temp->data_pointer.p3[(block_index)][(height_index)][(cell_index)]; ret += (uint64_t) gv_dvg->data_pointer.p3[(block_index)][(height_index)][(cell_index)]; } } } } { size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { ret += (uint64_t) gv_grad->data_pointer.p3[(block_index)][(height_index)][(edge_index)]; } } } } return ret; } void com1_cleanup(GRID * g) { com1.loaded = 0; free((void *) gv_temp->data_pointer.p3); free((void *) gv_grad->data_pointer.p3); free((void *) gv_dvg->data_pointer.p3); }
GB_unaryop__minv_bool_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_bool_int8 // op(A') function: GB_tran__minv_bool_int8 // C type: bool // A type: int8_t // cast: ; // unaryop: cij = true #define GB_ATYPE \ int8_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CASTING(z, x) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_bool_int8 ( bool *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_bool_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_alloc_hbw.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <omp.h> int main() { omp_alloctrait_t at[2]; omp_allocator_handle_t a; void *p[2]; at[0].key = omp_atk_pool_size; at[0].value = 2 * 1024 * 1024; at[1].key = omp_atk_fallback; at[1].value = omp_atv_null_fb; a = omp_init_allocator(omp_high_bw_mem_space, 2, at); printf("allocator hbw created: %p\n", (void *)a); #pragma omp parallel num_threads(2) { int i = omp_get_thread_num(); p[i] = omp_alloc(1024 * 1024, a); #pragma omp barrier printf("th %d, ptr %p\n", i, p[i]); omp_free(p[i], a); } if (a != omp_null_allocator) { // As an allocator has some small memory overhead // exactly one of the two pointers should be NULL // because of NULL fallback requested if ((p[0] == NULL && p[1] != NULL) || (p[0] != NULL && p[1] == NULL)) { printf("passed\n"); return 0; } else { printf("failed: pointers %p %p\n", p[0], p[1]); return 1; } } else { // NULL allocator should cause default allocations if (p[0] != NULL && p[1] != NULL) { printf("passed\n"); return 0; } else { printf("failed: pointers %p %p\n", p[0], p[1]); return 1; } } }
resize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE % % R R E SS I ZZ E % % RRRR EEE SSS I ZZZ EEE % % R R E SS I ZZ E % % R R EEEEE SSSSS IIIII ZZZZZ EEEEE % % % % % % MagickCore Image Resize Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/draw.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/magick.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/option.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/resize-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" #if defined(MAGICKCORE_LQR_DELEGATE) #include <lqr.h> #endif /* Typedef declarations. */ struct _ResizeFilter { MagickRealType (*filter)(const MagickRealType,const ResizeFilter *), (*window)(const MagickRealType,const ResizeFilter *), support, /* filter region of support - the filter support limit */ window_support, /* window support, usally equal to support (expert only) */ scale, /* dimension scaling to fit window support (usally 1.0) */ blur, /* x-scale (blur-sharpen) */ coefficient[7]; /* cubic coefficents for BC-cubic filters */ size_t signature; }; /* Forward declaractions. */ static MagickRealType I0(MagickRealType x), BesselOrderOne(MagickRealType), Sinc(const MagickRealType, const ResizeFilter *), SincFast(const MagickRealType, const ResizeFilter *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F i l t e r F u n c t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % These are the various filter and windowing functions that are provided. % % They are internal to this module only. See AcquireResizeFilterInfo() for % details of the access to these functions, via the GetResizeFilterSupport() % and GetResizeFilterWeight() API interface. % % The individual filter functions have this format... % % static MagickRealtype *FilterName(const MagickRealType x, % const MagickRealType support) % % A description of each parameter follows: % % o x: the distance from the sampling point generally in the range of 0 to % support. The GetResizeFilterWeight() ensures this a positive value. % % o resize_filter: current filter information. This allows function to % access support, and possibly other pre-calculated information defining % the functions. % */ static MagickRealType Blackman(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Blackman: 2nd order cosine windowing function: 0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x) Refactored by Chantal Racette and Nicolas Robidoux to one trig call and five flops. */ const MagickRealType cosine=cos((double) (MagickPI*x)); return(0.34+cosine*(0.5+cosine*0.16)); } static MagickRealType Bohman(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Bohman: 2rd Order cosine windowing function: (1-x) cos(pi x) + sin(pi x) / pi. Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops, taking advantage of the fact that the support of Bohman is 1.0 (so that we know that sin(pi x) >= 0). */ const double cosine=cos((double) (MagickPI*x)); const double sine=sqrt(1.0-cosine*cosine); return((MagickRealType) ((1.0-x)*cosine+(1.0/MagickPI)*sine)); } static MagickRealType Box(const MagickRealType magick_unused(x), const ResizeFilter *magick_unused(resize_filter)) { /* A Box filter is a equal weighting function (all weights equal). DO NOT LIMIT results by support or resize point sampling will work as it requests points beyond its normal 0.0 support size. */ return(1.0); } static MagickRealType Cosine(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Cosine window function: cos((pi/2)*x). */ return((MagickRealType)cos((double) (MagickPI2*x))); } static MagickRealType CubicBC(const MagickRealType x, const ResizeFilter *resize_filter) { /* Cubic Filters using B,C determined values: Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears Spline B = 1 C = 0 B-Spline Gaussian approximation Hermite B = 0 C = 0 B-Spline interpolator See paper by Mitchell and Netravali, Reconstruction Filters in Computer Graphics Computer Graphics, Volume 22, Number 4, August 1988 http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/ Mitchell.pdf. Coefficents are determined from B,C values: P0 = ( 6 - 2*B )/6 = coeff[0] P1 = 0 P2 = (-18 +12*B + 6*C )/6 = coeff[1] P3 = ( 12 - 9*B - 6*C )/6 = coeff[2] Q0 = ( 8*B +24*C )/6 = coeff[3] Q1 = ( -12*B -48*C )/6 = coeff[4] Q2 = ( 6*B +30*C )/6 = coeff[5] Q3 = ( - 1*B - 6*C )/6 = coeff[6] which are used to define the filter: P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1 Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2 which ensures function is continuous in value and derivative (slope). */ if (x < 1.0) return(resize_filter->coefficient[0]+x*(x* (resize_filter->coefficient[1]+x*resize_filter->coefficient[2]))); if (x < 2.0) return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x* (resize_filter->coefficient[5]+x*resize_filter->coefficient[6]))); return(0.0); } static MagickRealType Gaussian(const MagickRealType x, const ResizeFilter *resize_filter) { /* Gaussian with a sigma = 1/2 (or as user specified) Gaussian Formula (1D) ... exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2)) Gaussian Formula (2D) ... exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) or for radius exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) Note that it is only a change from 1-d to radial form is in the normalization multiplier which is not needed or used when Gaussian is used as a filter. The constants are pre-calculated... coeff[0]=sigma; coeff[1]=1.0/(2.0*sigma^2); coeff[2]=1.0/(sqrt(2*PI)*sigma^2); exp( -coeff[1]*(x^2)) ) * coeff[2]; However the multiplier coeff[1] is need, the others are informative only. This separates the gaussian 'sigma' value from the 'blur/support' settings allowing for its use in special 'small sigma' gaussians, without the filter 'missing' pixels because the support becomes too small. */ return(exp((double)(-resize_filter->coefficient[1]*x*x))); } static MagickRealType Hanning(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Cosine window function: 0.5+0.5*cos(pi*x). */ const MagickRealType cosine=cos((double) (MagickPI*x)); return(0.5+0.5*cosine); } static MagickRealType Hamming(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Offset cosine window function: .54 + .46 cos(pi x). */ const MagickRealType cosine=cos((double) (MagickPI*x)); return(0.54+0.46*cosine); } static MagickRealType Jinc(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions. http://mathworld.wolfram.com/JincFunction.html and page 11 of http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf The original "zoom" program by Paul Heckbert called this "Bessel". But really it is more accurately named "Jinc". */ if (x == 0.0) return((MagickRealType) (0.5*MagickPI)); return(BesselOrderOne((MagickRealType) MagickPI*x)/x); } static MagickRealType Kaiser(const MagickRealType x, const ResizeFilter *resize_filter) { /* Kaiser Windowing Function (bessel windowing) I0( beta * sqrt( 1-x^2) ) / IO(0) Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5). However it is typically defined in terms of Alpha*PI The normalization factor (coeff[1]) is not actually needed, but without it the filters has a large value at x=0 making it difficult to compare the function with other windowing functions. */ return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]* sqrt((double) (1.0-x*x)))); } static MagickRealType Lagrange(const MagickRealType x, const ResizeFilter *resize_filter) { MagickRealType value; register ssize_t i; ssize_t n, order; /* Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange function and depends on the overall support window size of the filter. That is: for a support of 2, it gives a lagrange-4 (piecewise cubic function). "n" identifies the piece of the piecewise polynomial. See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging, Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064. */ if (x > resize_filter->support) return(0.0); order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */ n=(ssize_t) (resize_filter->window_support+x); value=1.0f; for (i=0; i < order; i++) if (i != n) value*=(n-i-x)/(n-i); return(value); } static MagickRealType Quadratic(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* 2rd order (quadratic) B-Spline approximation of Gaussian. */ if (x < 0.5) return(0.75-x*x); if (x < 1.5) return(0.5*(x-1.5)*(x-1.5)); return(0.0); } static MagickRealType Sinc(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Scaled sinc(x) function using a trig call: sinc(x) == sin(pi x)/(pi x). */ if (x != 0.0) { const MagickRealType alpha=(MagickRealType) (MagickPI*x); return(sin((double) alpha)/alpha); } return((MagickRealType) 1.0); } static MagickRealType SincFast(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Approximations of the sinc function sin(pi x)/(pi x) over the interval [-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding from the Natural Sciences and Engineering Research Council of Canada. Although the approximations are polynomials (for low order of approximation) and quotients of polynomials (for higher order of approximation) and consequently are similar in form to Taylor polynomials / Pade approximants, the approximations are computed with a completely different technique. Summary: These approximations are "the best" in terms of bang (accuracy) for the buck (flops). More specifically: Among the polynomial quotients that can be computed using a fixed number of flops (with a given "+ - * / budget"), the chosen polynomial quotient is the one closest to the approximated function with respect to maximum absolute relative error over the given interval. The Remez algorithm, as implemented in the boost library's minimax package, is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/ math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html If outside of the interval of approximation, use the standard trig formula. */ if (x > 4.0) { const MagickRealType alpha=(MagickRealType) (MagickPI*x); return(sin((double) alpha)/alpha); } { /* The approximations only depend on x^2 (sinc is an even function). */ const MagickRealType xx = x*x; #if MAGICKCORE_QUANTUM_DEPTH <= 8 /* Maximum absolute relative error 6.3e-6 < 1/2^17. */ const double c0 = 0.173610016489197553621906385078711564924e-2L; const double c1 = -0.384186115075660162081071290162149315834e-3L; const double c2 = 0.393684603287860108352720146121813443561e-4L; const double c3 = -0.248947210682259168029030370205389323899e-5L; const double c4 = 0.107791837839662283066379987646635416692e-6L; const double c5 = -0.324874073895735800961260474028013982211e-8L; const double c6 = 0.628155216606695311524920882748052490116e-10L; const double c7 = -0.586110644039348333520104379959307242711e-12L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #elif MAGICKCORE_QUANTUM_DEPTH <= 16 /* Max. abs. rel. error 2.2e-8 < 1/2^25. */ const double c0 = 0.173611107357320220183368594093166520811e-2L; const double c1 = -0.384240921114946632192116762889211361285e-3L; const double c2 = 0.394201182359318128221229891724947048771e-4L; const double c3 = -0.250963301609117217660068889165550534856e-5L; const double c4 = 0.111902032818095784414237782071368805120e-6L; const double c5 = -0.372895101408779549368465614321137048875e-8L; const double c6 = 0.957694196677572570319816780188718518330e-10L; const double c7 = -0.187208577776590710853865174371617338991e-11L; const double c8 = 0.253524321426864752676094495396308636823e-13L; const double c9 = -0.177084805010701112639035485248501049364e-15L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9)))))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #else /* Max. abs. rel. error 1.2e-12 < 1/2^39. */ const double c0 = 0.173611111110910715186413700076827593074e-2L; const double c1 = -0.289105544717893415815859968653611245425e-3L; const double c2 = 0.206952161241815727624413291940849294025e-4L; const double c3 = -0.834446180169727178193268528095341741698e-6L; const double c4 = 0.207010104171026718629622453275917944941e-7L; const double c5 = -0.319724784938507108101517564300855542655e-9L; const double c6 = 0.288101675249103266147006509214934493930e-11L; const double c7 = -0.118218971804934245819960233886876537953e-13L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); const double d0 = 1.0L; const double d1 = 0.547981619622284827495856984100563583948e-1L; const double d2 = 0.134226268835357312626304688047086921806e-2L; const double d3 = 0.178994697503371051002463656833597608689e-4L; const double d4 = 0.114633394140438168641246022557689759090e-6L; const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4))); return((MagickRealType) ((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p)); #endif } } static MagickRealType Triangle(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* 1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function for Sinc(). */ if (x < 1.0) return(1.0-x); return(0.0); } static MagickRealType Welsh(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Welsh parabolic windowing filter. */ if (x < 1.0) return(1.0-x*x); return(0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResizeFilter() allocates the ResizeFilter structure. Choose from % these filters: % % FIR (Finite impulse Response) Filters % Box Triangle Quadratic % Spline Hermite Catrom % Mitchell % % IIR (Infinite impulse Response) Filters % Gaussian Sinc Jinc (Bessel) % % Windowed Sinc/Jinc Filters % Blackman Bohman Lanczos % Hann Hamming Cosine % Kaiser Welch Parzen % Bartlett % % Special Purpose Filters % Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp % Robidoux RobidouxSharp % % The users "-filter" selection is used to lookup the default 'expert' % settings for that filter from a internal table. However any provided % 'expert' settings (see below) may override this selection. % % FIR filters are used as is, and are limited to that filters support window % (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also % simply clipped by its support size (currently 1.5 or approximately 3*sigma % as recommended by many references) % % The special a 'cylindrical' filter flag will promote the default 4-lobed % Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better % suited to this style of image resampling. This typically happens when using % such a filter for images distortions. % % SPECIFIC FILTERS: % % Directly requesting 'Sinc', 'Jinc' function as a filter will force the use % of function without any windowing, or promotion for cylindrical usage. This % is not recommended, except by image processing experts, especially as part % of expert option filter function selection. % % Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is % computed using the traditional sin(pi*x)/(pi*x); it is selected if the user % specifically specifies the use of a Sinc filter. SincFast uses highly % accurate (and fast) polynomial (low Q) and rational (high Q) approximations, % and will be used by default in most cases. % % The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted % to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use). % The Sinc version is the most popular windowed filter. % % LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of % the Lanczos filter, specifically designed for EWA distortion (as a % Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos % (Sinc-Sinc) filter. The chosen blur value comes as close as possible to % satisfying the following condition without changing the character of the % corresponding EWA filter: % % 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with % only vertical or horizontal features are preserved when performing 'no-op" % with EWA distortion. % % The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos % filters. The 'sharp' version uses a blur factor of 0.9549963639785485, % again chosen because the resulting EWA filter comes as close as possible to % satisfying the above condition. % % Robidoux is another filter tuned for EWA. It is the Keys cubic filter % defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op' % Vertical and Horizontal Line Preservation Condition" exactly, and it % moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns % out to be close to both Mitchell and Lanczos2Sharp. For example, its first % crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the % first crossing of Mitchell and Lanczos2Sharp. % % RodidouxSharp is a slightly sharper version of Rodidoux, some believe it % is too sharp. It is designed to minimize the maximum possible change in % a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op % conditions. Amazingly Mitchell falls roughly between Rodidoux and % RodidouxSharp, though this seems to have been pure coincidence. % % 'EXPERT' OPTIONS: % % These artifact "defines" are not recommended for production use without % expert knowledge of resampling, filtering, and the effects they have on the % resulting resampled (resized or distorted) image. % % They can be used to override any and all filter default, and it is % recommended you make good use of "filter:verbose" to make sure that the % overall effect of your selection (before and after) is as expected. % % "filter:verbose" controls whether to output the exact results of the % filter selections made, as well as plotting data for graphing the % resulting filter over the filters support range. % % "filter:filter" select the main function associated with this filter % name, as the weighting function of the filter. This can be used to % set a windowing function as a weighting function, for special % purposes, such as graphing. % % If a "filter:window" operation has not been provided, a 'Box' % windowing function will be set to denote that no windowing function is % being used. % % "filter:window" Select this windowing function for the filter. While any % filter could be used as a windowing function, using the 'first lobe' of % that filter over the whole support window, using a non-windowing % function is not advisible. If no weighting filter function is specified % a 'SincFast' filter is used. % % "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a % simpler method of setting filter support size that will correctly % handle the Sinc/Jinc switch for an operators filtering requirements. % Only integers should be given. % % "filter:support" Set the support size for filtering to the size given. % This not recommended for Sinc/Jinc windowed filters (lobes should be % used instead). This will override any 'filter:lobes' option. % % "filter:win-support" Scale windowing function to this size instead. This % causes the windowing (or self-windowing Lagrange filter) to act is if % the support window it much much larger than what is actually supplied % to the calling operator. The filter however is still clipped to the % real support size given, by the support range supplied to the caller. % If unset this will equal the normal filter support size. % % "filter:blur" Scale the filter and support window by this amount. A value % of > 1 will generally result in a more blurred image with more ringing % effects, while a value <1 will sharpen the resulting image with more % aliasing effects. % % "filter:sigma" The sigma value to use for the Gaussian filter only. % Defaults to '1/2'. Using a different sigma effectively provides a % method of using the filter as a 'blur' convolution. Particularly when % using it for Distort. % % "filter:b" % "filter:c" Override the preset B,C values for a Cubic filter. % If only one of these are given it is assumes to be a 'Keys' type of % filter such that B+2C=1, where Keys 'alpha' value = C. % % Examples: % % Set a true un-windowed Sinc filter with 10 lobes (very slow): % -define filter:filter=Sinc % -define filter:lobes=8 % % Set an 8 lobe Lanczos (Sinc or Jinc) filter: % -filter Lanczos % -define filter:lobes=8 % % The format of the AcquireResizeFilter method is: % % ResizeFilter *AcquireResizeFilter(const Image *image, % const FilterTypes filter_type,const MagickBooleanType cylindrical, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filter: the filter type, defining a preset filter, window and support. % The artifact settings listed above will override those selections. % % o blur: blur the filter by this amount, use 1.0 if unknown. Image % artifact "filter:blur" will override this API call usage, including any % internal change (such as for cylindrical usage). % % o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial) % filter (Jinc). % % o exception: return any errors or warnings in this structure. % */ MagickExport ResizeFilter *AcquireResizeFilter(const Image *image, const FilterTypes filter,const MagickRealType blur, const MagickBooleanType cylindrical,ExceptionInfo *exception) { const char *artifact; FilterTypes filter_type, window_type; MagickRealType B, C, value; register ResizeFilter *resize_filter; /* Table Mapping given Filter, into Weighting and Windowing functions. A 'Box' windowing function means its a simble non-windowed filter. An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a "cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was specifically requested by the user. WARNING: The order of this table must match the order of the FilterTypes enumeration specified in "resample.h", or the filter names will not match the filter being setup. You can check filter setups with the "filter:verbose" expert setting. */ static struct { FilterTypes filter, window; } const mapping[SentinelFilter] = { { UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */ { PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */ { BoxFilter, BoxFilter }, /* Box averaging filter */ { TriangleFilter, BoxFilter }, /* Linear interpolation filter */ { HermiteFilter, BoxFilter }, /* Hermite interpolation filter */ { SincFastFilter, HanningFilter }, /* Hanning -- cosine-sinc */ { SincFastFilter, HammingFilter }, /* Hamming -- '' variation */ { SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */ { GaussianFilter, BoxFilter }, /* Gaussian blur filter */ { QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */ { CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */ { CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */ { MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */ { JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */ { SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */ { SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */ { SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */ { LanczosFilter, WelshFilter }, /* Welch -- parabolic (3 lobe) */ { SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */ { SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */ { SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */ { LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */ { LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */ { LanczosSharpFilter, LanczosSharpFilter }, /* | these require */ { Lanczos2Filter, Lanczos2Filter }, /* | special handling */ { Lanczos2SharpFilter, Lanczos2SharpFilter }, { RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */ { RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */ { LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */ { SplineFilter, BoxFilter }, /* Spline Cubic Filter */ { LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */ }; /* Table mapping the filter/window from the above table to an actual function. The default support size for that filter as a weighting function, the range to scale with to use that function as a sinc windowing function, (typ 1.0). Note that the filter_type -> function is 1 to 1 except for Sinc(), SincFast(), and CubicBC() functions, which may have multiple filter to function associations. See "filter:verbose" handling below for the function -> filter mapping. */ static struct { MagickRealType (*function)(const MagickRealType,const ResizeFilter*); double support, /* Default lobes/support size of the weighting filter. */ scale, /* Support when function used as a windowing function Typically equal to the location of the first zero crossing. */ B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */ } const filters[SentinelFilter] = { /* .--- support window (if used as a Weighting Function) | .--- first crossing (if used as a Windowing Function) | | .--- B value for Cubic Function | | | .---- C value for Cubic Function | | | | */ { Box, 0.5, 0.5, 0.0, 0.0 }, /* Undefined (default to Box) */ { Box, 0.0, 0.5, 0.0, 0.0 }, /* Point (special handling) */ { Box, 0.5, 0.5, 0.0, 0.0 }, /* Box */ { Triangle, 1.0, 1.0, 0.0, 0.0 }, /* Triangle */ { CubicBC, 1.0, 1.0, 0.0, 0.0 }, /* Hermite (cubic B=C=0) */ { Hanning, 1.0, 1.0, 0.0, 0.0 }, /* Hann, cosine window */ { Hamming, 1.0, 1.0, 0.0, 0.0 }, /* Hamming, '' variation */ { Blackman, 1.0, 1.0, 0.0, 0.0 }, /* Blackman, 2*cosine window */ { Gaussian, 2.0, 1.5, 0.0, 0.0 }, /* Gaussian */ { Quadratic, 1.5, 1.5, 0.0, 0.0 }, /* Quadratic gaussian */ { CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* General Cubic Filter */ { CubicBC, 2.0, 1.0, 0.0, 0.5 }, /* Catmull-Rom (B=0,C=1/2) */ { CubicBC, 2.0, 8.0/7.0, 1./3., 1./3. }, /* Mitchell (B=C=1/3) */ { Jinc, 3.0, 1.2196698912665045, 0.0, 0.0 }, /* Raw 3-lobed Jinc */ { Sinc, 4.0, 1.0, 0.0, 0.0 }, /* Raw 4-lobed Sinc */ { SincFast, 4.0, 1.0, 0.0, 0.0 }, /* Raw fast sinc ("Pade"-type) */ { Kaiser, 1.0, 1.0, 0.0, 0.0 }, /* Kaiser (square root window) */ { Welsh, 1.0, 1.0, 0.0, 0.0 }, /* Welsh (parabolic window) */ { CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* Parzen (B-Spline window) */ { Bohman, 1.0, 1.0, 0.0, 0.0 }, /* Bohman, 2*Cosine window */ { Triangle, 1.0, 1.0, 0.0, 0.0 }, /* Bartlett (triangle window) */ { Lagrange, 2.0, 1.0, 0.0, 0.0 }, /* Lagrange sinc approximation */ { SincFast, 3.0, 1.0, 0.0, 0.0 }, /* Lanczos, 3-lobed Sinc-Sinc */ { SincFast, 3.0, 1.0, 0.0, 0.0 }, /* Lanczos, Sharpened */ { SincFast, 2.0, 1.0, 0.0, 0.0 }, /* Lanczos, 2-lobed */ { SincFast, 2.0, 1.0, 0.0, 0.0 }, /* Lanczos2, sharpened */ /* Robidoux: Keys cubic close to Lanczos2D sharpened */ { CubicBC, 2.0, 1.1685777620836932, 0.37821575509399867, 0.31089212245300067 }, /* RobidouxSharp: Sharper version of Robidoux */ { CubicBC, 2.0, 1.105822933719019, 0.2620145123990142, 0.3689927438004929 }, { Cosine, 1.0, 1.0, 0.0, 0.0 }, /* Low level cosine window */ { CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* Cubic B-Spline (B=1,C=0) */ { SincFast, 3.0, 1.0, 0.0, 0.0 }, /* Lanczos, Interger Radius */ }; /* The known zero crossings of the Jinc() or more accurately the Jinc(x*PI) function being used as a filter. It is used by the "filter:lobes" expert setting and for 'lobes' for Jinc functions in the previous table. This way users do not have to deal with the highly irrational lobe sizes of the Jinc filter. Values taken from http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp using Jv-function with v=1, then dividing by PI. */ static double jinc_zeros[16] = { 1.2196698912665045, 2.2331305943815286, 3.2383154841662362, 4.2410628637960699, 5.2427643768701817, 6.2439216898644877, 7.2447598687199570, 8.2453949139520427, 9.2458926849494673, 10.246293348754916, 11.246622794877883, 12.246898461138105, 13.247132522181061, 14.247333735806849, 15.247508563037300, 16.247661874700962 }; /* Allocate resize filter. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(UndefinedFilter < filter && filter < SentinelFilter); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); resize_filter=(ResizeFilter *) AcquireMagickMemory(sizeof(*resize_filter)); if (resize_filter == (ResizeFilter *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(resize_filter,0,sizeof(*resize_filter)); /* Defaults for the requested filter. */ filter_type=mapping[filter].filter; window_type=mapping[filter].window; resize_filter->blur = blur; /* function argument blur factor (1.0) */ /* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */ if (cylindrical != MagickFalse && filter_type == SincFastFilter && filter != SincFastFilter ) filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */ /* Expert filter setting override */ artifact=GetImageArtifact(image,"filter:filter"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { /* Raw filter request - no window function. */ filter_type=(FilterTypes) option; window_type=BoxFilter; } /* Filter override with a specific window function. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) window_type=(FilterTypes) option; } } else { /* Window specified, but no filter function? Assume Sinc/Jinc. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { filter_type=cylindrical != MagickFalse ? JincFilter : SincFastFilter; window_type=(FilterTypes) option; } } } /* Assign the real functions to use for the filters selected. */ resize_filter->filter=filters[filter_type].function; resize_filter->support=filters[filter_type].support; resize_filter->window=filters[window_type].function; resize_filter->scale=filters[window_type].scale; resize_filter->signature=MagickSignature; /* Filter Modifications for orthogonal/cylindrical usage */ if (cylindrical != MagickFalse) switch (filter_type) { case BoxFilter: /* Support for Cylindrical Box should be sqrt(2)/2 */ resize_filter->support=(MagickRealType) MagickSQ1_2; break; case LanczosFilter: case LanczosSharpFilter: case Lanczos2Filter: case Lanczos2SharpFilter: case LanczosRadiusFilter: resize_filter->filter=filters[JincFilter].function; resize_filter->window=filters[JincFilter].function; resize_filter->scale=filters[JincFilter].scale; /* number of lobes (support window size) remain unchanged */ break; default: break; } /* Global Sharpening (regardless of orthoginal/cylindrical) */ switch (filter_type) { case LanczosSharpFilter: resize_filter->blur *= (MagickRealType) 0.9812505644269356; break; case Lanczos2SharpFilter: resize_filter->blur *= (MagickRealType) 0.9549963639785485; break; /* case LanczosRadius: blur adjust is done after lobes */ default: break; } /* Expert Option Modifications. */ /* User Gaussian Sigma Override - no support change */ if ((resize_filter->filter == Gaussian) || (resize_filter->window == Gaussian) ) { value=0.5; /* guassian sigma default, half pixel */ artifact=GetImageArtifact(image,"filter:sigma"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); /* Define coefficents for Gaussian */ resize_filter->coefficient[0]=value; /* note sigma too */ resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */ resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value); /* normalization - not actually needed or used! */ if ( value > 0.5 ) resize_filter->support *= value/0.5; /* increase support */ } /* User Kaiser Alpha Override - no support change */ if ((resize_filter->filter == Kaiser) || (resize_filter->window == Kaiser) ) { value=6.5; /* default beta value for Kaiser bessel windowing function */ artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */ if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-beta"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-alpha"); if (artifact != (const char *) NULL) value=(MagickRealType) (StringToDouble(artifact,(char **) NULL)*MagickPI); /* Define coefficents for Kaiser Windowing Function */ resize_filter->coefficient[0]=value; /* alpha */ resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value)); /* normalization */ } /* Support Overrides */ artifact=GetImageArtifact(image,"filter:lobes"); if (artifact != (const char *) NULL) { ssize_t lobes; lobes=(ssize_t) StringToLong(artifact); if (lobes < 1) lobes=1; resize_filter->support=(MagickRealType) lobes; } /* Convert a Jinc function lobes value to a real support value */ if (resize_filter->filter == Jinc) { if (resize_filter->support > 16) resize_filter->support=jinc_zeros[15]; /* largest entry in table */ else resize_filter->support=jinc_zeros[((long)resize_filter->support)-1]; /* blur this filter so support is a integer value (lobes dependant) */ if (filter_type == LanczosRadiusFilter) { resize_filter->blur *= floor(resize_filter->support)/ resize_filter->support; } } /* Expert Blur Override */ artifact=GetImageArtifact(image,"filter:blur"); if (artifact != (const char *) NULL) resize_filter->blur*=StringToDouble(artifact,(char **) NULL); if (resize_filter->blur < MagickEpsilon) resize_filter->blur=(MagickRealType) MagickEpsilon; /* Expert override of the support setting */ artifact=GetImageArtifact(image,"filter:support"); if (artifact != (const char *) NULL) resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL)); /* Scale windowing function separately to the support 'clipping' window that calling operator is planning to actually use. (Expert override) */ resize_filter->window_support=resize_filter->support; /* default */ artifact=GetImageArtifact(image,"filter:win-support"); if (artifact != (const char *) NULL) resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL)); /* Adjust window function scaling to match windowing support for weighting function. This avoids a division on every filter call. */ resize_filter->scale/=resize_filter->window_support; /* * Set Cubic Spline B,C values, calculate Cubic coefficients. */ B=0.0; C=0.0; if ((resize_filter->filter == CubicBC) || (resize_filter->window == CubicBC) ) { B=filters[filter_type].B; C=filters[filter_type].C; if (filters[window_type].function == CubicBC) { B=filters[window_type].B; C=filters[window_type].C; } artifact=GetImageArtifact(image,"filter:b"); if (artifact != (const char *) NULL) { B=StringToDouble(artifact,(char **) NULL); C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */ artifact=GetImageArtifact(image,"filter:c"); /* user C override */ if (artifact != (const char *) NULL) C=StringToDouble(artifact,(char **) NULL); } else { artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) { C=StringToDouble(artifact,(char **) NULL); B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */ } } /* Convert B,C values into Cubic Coefficents. See CubicBC(). */ { const double twoB = B+B; resize_filter->coefficient[0]=1.0-(1.0/3.0)*B; resize_filter->coefficient[1]=-3.0+twoB+C; resize_filter->coefficient[2]=2.0-1.5*B-C; resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C; resize_filter->coefficient[4]=-8.0*C-twoB; resize_filter->coefficient[5]=B+5.0*C; resize_filter->coefficient[6]=(-1.0/6.0)*B-C; } } /* Expert Option Request for verbose details of the resulting filter. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp master { #endif artifact=GetImageArtifact(image,"filter:verbose"); if (IsMagickTrue(artifact)) { double support, x; /* Set the weighting function properly when the weighting function may not exactly match the filter of the same name. EG: a Point filter is really uses a Box weighting function with a different support than is typically used. */ if (resize_filter->filter == Box) filter_type=BoxFilter; if (resize_filter->filter == Sinc) filter_type=SincFilter; if (resize_filter->filter == SincFast) filter_type=SincFastFilter; if (resize_filter->filter == Jinc) filter_type=JincFilter; if (resize_filter->filter == CubicBC) filter_type=CubicFilter; if (resize_filter->window == Box) window_type=BoxFilter; if (resize_filter->window == Sinc) window_type=SincFilter; if (resize_filter->window == SincFast) window_type=SincFastFilter; if (resize_filter->window == Jinc) window_type=JincFilter; if (resize_filter->window == CubicBC) window_type=CubicFilter; /* Report Filter Details. */ support=GetResizeFilterSupport(resize_filter); /* practical_support */ (void) FormatLocaleFile(stdout,"# Resampling Filter (for graphing)\n#\n"); (void) FormatLocaleFile(stdout,"# filter = %s\n", CommandOptionToMnemonic(MagickFilterOptions,filter_type)); (void) FormatLocaleFile(stdout,"# window = %s\n", CommandOptionToMnemonic(MagickFilterOptions,window_type)); (void) FormatLocaleFile(stdout,"# support = %.*g\n", GetMagickPrecision(),(double) resize_filter->support); (void) FormatLocaleFile(stdout,"# window-support = %.*g\n", GetMagickPrecision(),(double) resize_filter->window_support); (void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n", GetMagickPrecision(), (double)resize_filter->blur); if ( filter_type == GaussianFilter || window_type == GaussianFilter ) (void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n", GetMagickPrecision(), (double)resize_filter->coefficient[0]); if ( filter_type == KaiserFilter || window_type == KaiserFilter ) (void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n", GetMagickPrecision(), (double)resize_filter->coefficient[0]); (void) FormatLocaleFile(stdout,"# practical-support = %.*g\n", GetMagickPrecision(), (double)support); if ( filter_type == CubicFilter || window_type == CubicFilter ) (void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n", GetMagickPrecision(),(double)B, GetMagickPrecision(),(double)C); (void) FormatLocaleFile(stdout,"\n"); /* Output values of resulting filter graph -- for graphing filter result. */ for (x=0.0; x <= support; x+=0.01f) (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,GetMagickPrecision(), (double) GetResizeFilterWeight(resize_filter,x)); /* A final value so gnuplot can graph the 'stop' properly. */ (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support, GetMagickPrecision(),0.0); } /* Output the above once only for each image - remove setting */ (void) DeleteImageArtifact((Image *) image,"filter:verbose"); #if defined(MAGICKCORE_OPENMP_SUPPORT) } #endif return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveResizeImage() adaptively resize image with pixel resampling. % % This is shortcut function for a fast interpolative resize using mesh % interpolation. It works well for small resizes of less than +/- 50% % of the original image size. For larger resizing on images a full % filtered and slower resize function should be used instead. % % The format of the AdaptiveResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { return(InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel, exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + B e s s e l O r d e r O n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BesselOrderOne() computes the Bessel function of x of the first kind of % order 0. This is used to create the Jinc() filter function below. % % Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8] % % j1(x) = x*j1(x); % % For x in (8,inf) % % j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1)) % % where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow: % % cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4) % = 1/sqrt(2) * (sin(x) - cos(x)) % sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) % = -1/sqrt(2) * (sin(x) + cos(x)) % % The format of the BesselOrderOne method is: % % MagickRealType BesselOrderOne(MagickRealType x) % % A description of each parameter follows: % % o x: MagickRealType value. % */ #undef I0 static MagickRealType I0(MagickRealType x) { MagickRealType sum, t, y; register ssize_t i; /* Zeroth order Bessel function of the first kind. */ sum=1.0; y=x*x/4.0; t=y; for (i=2; t > MagickEpsilon; i++) { sum+=t; t*=y/((MagickRealType) i*i); } return(sum); } #undef J1 static MagickRealType J1(MagickRealType x) { MagickRealType p, q; register ssize_t i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p=Pone[8]; q=Qone[8]; for (i=7; i >= 0; i--) { p=p*x*x+Pone[i]; q=q*x*x+Qone[i]; } return(p/q); } #undef P1 static MagickRealType P1(MagickRealType x) { MagickRealType p, q; register ssize_t i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } #undef Q1 static MagickRealType Q1(MagickRealType x) { MagickRealType p, q; register ssize_t i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static MagickRealType BesselOrderOne(MagickRealType x) { MagickRealType p, q; if (x == 0.0) return(0.0); p=x; if (x < 0.0) x=(-x); if (x < 8.0) return(p*J1(x)); q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)- cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+ cos((double) x)))); if (p < 0.0) q=(-q); return(q); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResizeFilter() destroy the resize filter. % % The format of the DestroyResizeFilter method is: % % ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) % % A description of each parameter follows: % % o resize_filter: the resize filter. % */ MagickExport ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); resize_filter->signature=(~MagickSignature); resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter); return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterSupport() return the current support window size for this % filter. Note that this may have been enlarged by filter:blur factor. % % The format of the GetResizeFilterSupport method is: % % MagickRealType GetResizeFilterSupport(const ResizeFilter *resize_filter) % % A description of each parameter follows: % % o filter: Image filter to use. % */ MagickExport MagickRealType GetResizeFilterSupport( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); return(resize_filter->support*resize_filter->blur); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r W e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterWeight evaluates the specified resize filter at the point x % which usally lies between zero and the filters current 'support' and % returns the weight of the filter function at that point. % % The format of the GetResizeFilterWeight method is: % % MagickRealType GetResizeFilterWeight(const ResizeFilter *resize_filter, % const MagickRealType x) % % A description of each parameter follows: % % o filter: the filter type. % % o x: the point. % */ MagickExport MagickRealType GetResizeFilterWeight( const ResizeFilter *resize_filter,const MagickRealType x) { MagickRealType scale, weight, x_blur; /* Windowing function - scale the weighting filter by this amount. */ assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */ if ((resize_filter->window_support < MagickEpsilon) || (resize_filter->window == Box)) scale=1.0; /* Point or Box Filter -- avoid division by zero */ else { scale=resize_filter->scale; scale=resize_filter->window(x_blur*scale,resize_filter); } weight=scale*resize_filter->filter(x_blur,resize_filter); return(weight); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolativeResizeImage() resizes an image using the specified % interpolation method. % % The format of the InterpolativeResizeImage method is: % % Image *InterpolativeResizeImage(const Image *image,const size_t columns, % const size_t rows,const InterpolatePixelMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *InterpolativeResizeImage(const Image *image, const size_t columns,const size_t rows,const InterpolatePixelMethod method, ExceptionInfo *exception) { #define InterpolativeResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; Image *resize_image; MagickBooleanType status; MagickOffsetType progress; PointInfo scale; ssize_t y; /* Interpolatively resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(resize_image,DirectClass) == MagickFalse) { InheritException(exception,&resize_image->exception); resize_image=DestroyImage(resize_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); scale.x=(double) image->columns/resize_image->columns; scale.y=(double) image->rows/resize_image->rows; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { MagickPixelPacket pixel; PointInfo offset; register IndexPacket *restrict resize_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if (q == (PixelPacket *) NULL) continue; resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); GetMagickPixelPacket(image,&pixel); offset.y=((MagickRealType) y+0.5)*scale.y-0.5; for (x=0; x < (ssize_t) resize_image->columns; x++) { offset.x=((MagickRealType) x+0.5)*scale.x-0.5; (void) InterpolateMagickPixelPacket(image,image_view,method,offset.x, offset.y,&pixel,exception); SetPixelPacket(resize_image,&pixel,q,resize_indexes+x); q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) continue; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InterpolativeResizeImage) #endif proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) resize_image=DestroyImage(resize_image); return(resize_image); } #if defined(MAGICKCORE_LQR_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i q u i d R e s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiquidRescaleImage() rescales image with seam carving. % % The format of the LiquidRescaleImage method is: % % Image *LiquidRescaleImage(const Image *image, % const size_t columns,const size_t rows, % const double delta_x,const double rigidity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the rescaled image. % % o rows: the number of rows in the rescaled image. % % o delta_x: maximum seam transversal step (0 means straight seams). % % o rigidity: introduce a bias for non-straight seams (typically 0). % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns, const size_t rows,const double delta_x,const double rigidity, ExceptionInfo *exception) { #define LiquidRescaleImageTag "Rescale/Image" CacheView *rescale_view; const char *map; guchar *packet; Image *rescale_image; int x, y; LqrCarver *carver; LqrRetVal lqr_status; MagickBooleanType status; MagickPixelPacket pixel; MemoryInfo *pixel_info; unsigned char *pixels; /* Liquid rescale image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); if ((columns <= 2) || (rows <= 2)) return(ResizeImage(image,columns,rows,image->filter,image->blur,exception)); if ((columns >= (2*image->columns)) || (rows >= (2*image->rows))) { Image *resize_image; size_t height, width; /* Honor liquid resize size limitations. */ for (width=image->columns; columns >= (2*width-1); width*=2); for (height=image->rows; rows >= (2*height-1); height*=2); resize_image=ResizeImage(image,width,height,image->filter,image->blur, exception); if (resize_image == (Image *) NULL) return((Image *) NULL); rescale_image=LiquidRescaleImage(resize_image,columns,rows,delta_x, rigidity,exception); resize_image=DestroyImage(resize_image); return(rescale_image); } map="RGB"; if (image->matte != MagickFalse) map="RGBA"; if (image->colorspace == CMYKColorspace) { map="CMYK"; if (image->matte != MagickFalse) map="CMYKA"; } pixel_info=AcquireVirtualMemory(image->columns,image->rows*strlen(map)* sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) return((Image *) NULL); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); status=ExportImagePixels(image,0,0,image->columns,image->rows,map,CharPixel, pixels,exception); if (status == MagickFalse) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } carver=lqr_carver_new(pixels,image->columns,image->rows,strlen(map)); if (carver == (LqrCarver *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } lqr_carver_set_preserve_input_image(carver); lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity); lqr_status=lqr_carver_resize(carver,columns,rows); (void) lqr_status; rescale_image=CloneImage(image,lqr_carver_get_width(carver), lqr_carver_get_height(carver),MagickTrue,exception); if (rescale_image == (Image *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); return((Image *) NULL); } if (SetImageStorageClass(rescale_image,DirectClass) == MagickFalse) { InheritException(exception,&rescale_image->exception); rescale_image=DestroyImage(rescale_image); return((Image *) NULL); } GetMagickPixelPacket(rescale_image,&pixel); (void) lqr_carver_scan_reset(carver); rescale_view=AcquireAuthenticCacheView(rescale_image,exception); while (lqr_carver_scan(carver,&x,&y,&packet) != 0) { register IndexPacket *restrict rescale_indexes; register PixelPacket *restrict q; q=QueueCacheViewAuthenticPixels(rescale_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; rescale_indexes=GetCacheViewAuthenticIndexQueue(rescale_view); pixel.red=QuantumRange*(packet[0]/255.0); pixel.green=QuantumRange*(packet[1]/255.0); pixel.blue=QuantumRange*(packet[2]/255.0); if (image->colorspace != CMYKColorspace) { if (image->matte == MagickFalse) pixel.opacity=QuantumRange*(packet[3]/255.0); } else { pixel.index=QuantumRange*(packet[3]/255.0); if (image->matte == MagickFalse) pixel.opacity=QuantumRange*(packet[4]/255.0); } SetPixelPacket(rescale_image,&pixel,q,rescale_indexes); if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse) break; } rescale_view=DestroyCacheView(rescale_view); /* Relinquish resources. */ pixel_info=RelinquishVirtualMemory(pixel_info); lqr_carver_destroy(carver); return(rescale_image); } #else MagickExport Image *LiquidRescaleImage(const Image *image, const size_t magick_unused(columns),const size_t magick_unused(rows), const double magick_unused(delta_x),const double magick_unused(rigidity), ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError, "DelegateLibrarySupportNotBuiltIn","`%s' (LQR)",image->filename); return((Image *) NULL); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagnifyImage() doubles the size of the image with a pixel art scaling % algorithm. % % The format of the MagnifyImage method is: % % Image *MagnifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception) { #define MagnifyImageTag "Magnify/Image" CacheView *image_view, *magnify_view; Image *magnify_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize magnified image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); magnify_image=CloneImage(image,2*image->columns,2*image->rows,MagickTrue, exception); if (magnify_image == (Image *) NULL) return((Image *) NULL); /* Magnify image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); magnify_view=AcquireAuthenticCacheView(magnify_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,magnify_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict magnify_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(magnify_view,0,2*y,magnify_image->columns,2, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } magnify_indexes=GetCacheViewAuthenticIndexQueue(magnify_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity[9]; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register PixelPacket *restrict r; register ssize_t i; /* Magnify this row of pixels. */ p=GetCacheViewVirtualPixels(image_view,x-1,y-1,3,3,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (i=0; i < 9; i++) intensity[i]=GetPixelIntensity(image,p+i); r=q; if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) || (fabs(intensity[3]-intensity[5]) < MagickEpsilon)) { /* Clone center pixel. */ *r=p[4]; r++; *r=p[4]; r+=(magnify_image->columns-1); *r=p[4]; r++; *r=p[4]; } else { /* Selectively clone pixel. */ if (fabs(intensity[1]-intensity[3]) < MagickEpsilon) *r=p[3]; else *r=p[4]; r++; if (fabs(intensity[1]-intensity[5]) < MagickEpsilon) *r=p[5]; else *r=p[4]; r+=(magnify_image->columns-1); if (fabs(intensity[3]-intensity[7]) < MagickEpsilon) *r=p[3]; else *r=p[4]; r++; if (fabs(intensity[5]-intensity[7]) < MagickEpsilon) *r=p[5]; else *r=p[4]; } if (indexes != (const IndexPacket *) NULL) { register IndexPacket *r; /* Magnify the colormap indexes. */ r=magnify_indexes; if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) || (fabs(intensity[3]-intensity[5]) < MagickEpsilon)) { /* Clone center pixel. */ *r=indexes[4]; r++; *r=indexes[4]; r+=(magnify_image->columns-1); *r=indexes[4]; r++; *r=indexes[4]; } else { /* Selectively clone pixel. */ if (fabs(intensity[1]-intensity[3]) < MagickEpsilon) *r=indexes[3]; else *r=indexes[4]; r++; if (fabs(intensity[1]-intensity[5]) < MagickEpsilon) *r=indexes[5]; else *r=indexes[4]; r+=(magnify_image->columns-1); if (fabs(intensity[3]-intensity[7]) < MagickEpsilon) *r=indexes[3]; else *r=indexes[4]; r++; if (fabs(intensity[5]-intensity[7]) < MagickEpsilon) *r=indexes[5]; else *r=indexes[4]; } magnify_indexes+=2; } q+=2; } if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MagnifyImage) #endif proceed=SetImageProgress(image,MagnifyImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } magnify_view=DestroyCacheView(magnify_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) magnify_image=DestroyImage(magnify_image); return(magnify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinifyImage() is a convenience method that scales an image proportionally to % half its size. % % The format of the MinifyImage method is: % % Image *MinifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception) { Image *minify_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter, 1.0,exception); return(minify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResampleImage() resize image in terms of its pixel size, so that when % displayed at the given resolution it will be the same size in terms of % real world units as the original image at the original resolution. % % The format of the ResampleImage method is: % % Image *ResampleImage(Image *image,const double x_resolution, % const double y_resolution,const FilterTypes filter,const double blur, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be resized to fit the given resolution. % % o x_resolution: the new image x resolution. % % o y_resolution: the new image y resolution. % % o filter: Image filter to use. % % o blur: the blur factor where > 1 is blurry, < 1 is sharp. % */ MagickExport Image *ResampleImage(const Image *image,const double x_resolution, const double y_resolution,const FilterTypes filter,const double blur, ExceptionInfo *exception) { #define ResampleImageTag "Resample/Image" Image *resample_image; size_t height, width; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=(size_t) (x_resolution*image->columns/(image->x_resolution == 0.0 ? 72.0 : image->x_resolution)+0.5); height=(size_t) (y_resolution*image->rows/(image->y_resolution == 0.0 ? 72.0 : image->y_resolution)+0.5); resample_image=ResizeImage(image,width,height,filter,blur,exception); if (resample_image != (Image *) NULL) { resample_image->x_resolution=x_resolution; resample_image->y_resolution=y_resolution; } return(resample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeImage() scales an image to the desired dimensions, using the given % filter (see AcquireFilterInfo()). % % If an undefined filter is given the filter defaults to Mitchell for a % colormapped image, a image with a matte channel, or if the image is % enlarged. Otherwise the filter defaults to a Lanczos. % % ResizeImage() was inspired by Paul Heckbert's "zoom" program. % % The format of the ResizeImage method is: % % Image *ResizeImage(Image *image,const size_t columns, % const size_t rows,const FilterTypes filter,const double blur, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o filter: Image filter to use. % % o blur: the blur factor where > 1 is blurry, < 1 is sharp. Typically set % this to 1.0. % % o exception: return any errors or warnings in this structure. % */ typedef struct _ContributionInfo { MagickRealType weight; ssize_t pixel; } ContributionInfo; static ContributionInfo **DestroyContributionThreadSet( ContributionInfo **contribution) { register ssize_t i; assert(contribution != (ContributionInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (contribution[i] != (ContributionInfo *) NULL) contribution[i]=(ContributionInfo *) RelinquishAlignedMemory( contribution[i]); contribution=(ContributionInfo **) RelinquishMagickMemory(contribution); return(contribution); } static ContributionInfo **AcquireContributionThreadSet(const size_t count) { register ssize_t i; ContributionInfo **contribution; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads, sizeof(*contribution)); if (contribution == (ContributionInfo **) NULL) return((ContributionInfo **) NULL); (void) ResetMagickMemory(contribution,0,number_threads*sizeof(*contribution)); for (i=0; i < (ssize_t) number_threads; i++) { contribution[i]=(ContributionInfo *) MagickAssumeAligned( AcquireAlignedMemory(count,sizeof(**contribution))); if (contribution[i] == (ContributionInfo *) NULL) return(DestroyContributionThreadSet(contribution)); } return(contribution); } static inline double MagickMax(const double x,const double y) { if (x > y) return(x); return(y); } static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const MagickRealType x_factor, const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **restrict contributions; MagickBooleanType status; MagickPixelPacket zero; MagickRealType scale, support; ssize_t x; /* Apply filter to resize horizontally from image to resize image. */ scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class) == MagickFalse) { InheritException(exception,&resize_image->exception); return(MagickFalse); } if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(MagickRealType) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); (void) ResetMagickMemory(&zero,0,sizeof(zero)); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,resize_image,resize_image->columns,1) #endif for (x=0; x < (ssize_t) resize_image->columns; x++) { MagickRealType bisect, density; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ContributionInfo *restrict contribution; register IndexPacket *restrict resize_indexes; register PixelPacket *restrict q; register ssize_t y; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(MagickRealType) (x+0.5)/x_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns); density=0.0; contribution=contributions[GetOpenMPThreadId()]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((MagickRealType) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception); q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); for (y=0; y < (ssize_t) resize_image->rows; y++) { MagickPixelPacket pixel; MagickRealType alpha; register ssize_t i; ssize_t j; pixel=zero; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight; pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=alpha*GetPixelOpacity(p+j); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight; pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+y,ClampToQuantum(pixel.index)); } } else { double gamma; gamma=0.0; for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j); pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j); gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j); pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+y,ClampToQuantum(gamma*pixel.index)); } } if ((resize_image->storage_class == PseudoClass) && (image->storage_class == PseudoClass)) { i=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop- 1.0)+0.5); j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i-start].pixel-contribution[0].pixel); SetPixelIndex(resize_indexes+y,GetPixelIndex(indexes+j)); } q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HorizontalFilter) #endif proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const MagickRealType y_factor, const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception) { CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **restrict contributions; MagickBooleanType status; MagickPixelPacket zero; MagickRealType scale, support; ssize_t y; /* Apply filter to resize vertically from image to resize image. */ scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class) == MagickFalse) { InheritException(exception,&resize_image->exception); return(MagickFalse); } if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(MagickRealType) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); (void) ResetMagickMemory(&zero,0,sizeof(zero)); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { MagickRealType bisect, density; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ContributionInfo *restrict contribution; register IndexPacket *restrict resize_indexes; register PixelPacket *restrict q; register ssize_t x; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(MagickRealType) (y+0.5)/y_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows); density=0.0; contribution=contributions[GetOpenMPThreadId()]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((MagickRealType) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel, image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1), exception); q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); for (x=0; x < (ssize_t) resize_image->columns; x++) { MagickPixelPacket pixel; MagickRealType alpha; register ssize_t i; ssize_t j; pixel=zero; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight; pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=alpha*GetPixelOpacity(p+j); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight; pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+x,ClampToQuantum(pixel.index)); } } else { double gamma; gamma=0.0; for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j); pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j); gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j); pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+x,ClampToQuantum(gamma*pixel.index)); } } if ((resize_image->storage_class == PseudoClass) && (image->storage_class == PseudoClass)) { i=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop- 1.0)+0.5); j=(ssize_t) ((contribution[i-start].pixel-contribution[0].pixel)* image->columns+x); SetPixelIndex(resize_indexes+x,GetPixelIndex(indexes+j)); } q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_VerticalFilter) #endif proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } MagickExport Image *ResizeImage(const Image *image,const size_t columns, const size_t rows,const FilterTypes filter,const double blur, ExceptionInfo *exception) { FilterTypes filter_type; Image *filter_image, *resize_image; MagickOffsetType offset; MagickRealType x_factor, y_factor; MagickSizeType span; MagickStatusType status; ResizeFilter *resize_filter; /* Acquire resize image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows) && (filter == UndefinedFilter) && (blur == 1.0)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return(resize_image); /* Acquire resize filter. */ x_factor=(MagickRealType) columns/(MagickRealType) image->columns; y_factor=(MagickRealType) rows/(MagickRealType) image->rows; if (x_factor > y_factor) filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception); else filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception); if (filter_image == (Image *) NULL) return(DestroyImage(resize_image)); filter_type=LanczosFilter; if (filter != UndefinedFilter) filter_type=filter; else if ((x_factor == 1.0) && (y_factor == 1.0)) filter_type=PointFilter; else if ((image->storage_class == PseudoClass) || (image->matte != MagickFalse) || ((x_factor*y_factor) > 1.0)) filter_type=MitchellFilter; resize_filter=AcquireResizeFilter(image,filter_type,blur,MagickFalse, exception); /* Resize image. */ offset=0; if (x_factor > y_factor) { span=(MagickSizeType) (filter_image->columns+rows); status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span, &offset,exception); status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor, span,&offset,exception); } else { span=(MagickSizeType) (filter_image->rows+columns); status=VerticalFilter(resize_filter,image,filter_image,y_factor,span, &offset,exception); status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor, span,&offset,exception); } /* Free resources. */ filter_image=DestroyImage(filter_image); resize_filter=DestroyResizeFilter(resize_filter); if (status == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } resize_image->type=image->type; return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SampleImage() scales an image to the desired dimensions with pixel % sampling. Unlike other scaling methods, this method does not introduce % any additional color into the scaled image. % % The format of the SampleImage method is: % % Image *SampleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the sampled image. % % o rows: the number of rows in the sampled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SampleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleImageTag "Sample/Image" CacheView *image_view, *sample_view; Image *sample_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t x; ssize_t *x_offset, y; PointInfo sample_offset; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); sample_image=CloneImage(image,columns,rows,MagickTrue,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); /* Check for posible user defined sampling offset Artifact The default sampling offset is in the mid-point of sample regions. */ sample_offset.x=sample_offset.y=0.5-MagickEpsilon; { const char *value; value=GetImageArtifact(image,"sample:offset"); if (value != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; (void) ParseGeometry(value,&geometry_info); flags=ParseGeometry(value,&geometry_info); sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon; if ((flags & SigmaValue) != 0) sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon; } } /* Allocate scan line buffer and column offset buffers. */ x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns, sizeof(*x_offset)); if (x_offset == (ssize_t *) NULL) { sample_image=DestroyImage(sample_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (x=0; x < (ssize_t) sample_image->columns; x++) x_offset[x]=(ssize_t) ((((double) x+sample_offset.x)*image->columns)/ sample_image->columns); /* Sample each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sample_view=AcquireAuthenticCacheView(sample_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,sample_image,1,1) #endif for (y=0; y < (ssize_t) sample_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict sample_indexes; register PixelPacket *restrict q; register ssize_t x; ssize_t y_offset; if (status == MagickFalse) continue; y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/ sample_image->rows); p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1, exception); q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); sample_indexes=GetCacheViewAuthenticIndexQueue(sample_view); /* Sample each column. */ for (x=0; x < (ssize_t) sample_image->columns; x++) *q++=p[x_offset[x]]; if ((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) sample_image->columns; x++) SetPixelIndex(sample_indexes+x,GetPixelIndex(indexes+x_offset[x])); if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SampleImage) #endif proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sample_view=DestroyCacheView(sample_view); x_offset=(ssize_t *) RelinquishMagickMemory(x_offset); sample_image->type=image->type; if (status == MagickFalse) sample_image=DestroyImage(sample_image); return(sample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleImage() changes the size of an image to the given dimensions. % % The format of the ScaleImage method is: % % Image *ScaleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ScaleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define ScaleImageTag "Scale/Image" CacheView *image_view, *scale_view; Image *scale_image; MagickBooleanType next_column, next_row, proceed, status; MagickPixelPacket pixel, *scale_scanline, *scanline, *x_vector, *y_vector, zero; MagickRealType alpha; PointInfo scale, span; register ssize_t i; ssize_t number_rows, y; /* Initialize scaled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); scale_image=CloneImage(image,columns,rows,MagickTrue,exception); if (scale_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(scale_image,DirectClass) == MagickFalse) { InheritException(exception,&scale_image->exception); scale_image=DestroyImage(scale_image); return((Image *) NULL); } /* Allocate memory. */ x_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*x_vector)); scanline=x_vector; if (image->rows != scale_image->rows) scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*scanline)); scale_scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) scale_image->columns,sizeof(*scale_scanline)); y_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*y_vector)); if ((scanline == (MagickPixelPacket *) NULL) || (scale_scanline == (MagickPixelPacket *) NULL) || (x_vector == (MagickPixelPacket *) NULL) || (y_vector == (MagickPixelPacket *) NULL)) { scale_image=DestroyImage(scale_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Scale image. */ number_rows=0; next_row=MagickTrue; span.y=1.0; scale.y=(double) scale_image->rows/(double) image->rows; (void) ResetMagickMemory(y_vector,0,(size_t) image->columns* sizeof(*y_vector)); GetMagickPixelPacket(image,&pixel); (void) ResetMagickMemory(&zero,0,sizeof(zero)); i=0; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); scale_view=AcquireAuthenticCacheView(scale_image,exception); for (y=0; y < (ssize_t) scale_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict scale_indexes; register MagickPixelPacket *restrict s, *restrict t; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) break; q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; break; } alpha=1.0; scale_indexes=GetCacheViewAuthenticIndexQueue(scale_view); if (scale_image->rows == image->rows) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1, exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte != MagickFalse) alpha=QuantumScale*GetPixelAlpha(p); x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p)); x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p)); x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p)); if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p); if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) (alpha*GetPixelIndex(indexes+x)); p++; } } else { /* Scale Y direction. */ while (scale.y < span.y) { if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1, exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte != MagickFalse) alpha=QuantumScale*GetPixelAlpha(p); x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p)); x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p)); x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p)); if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p); if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) (alpha* GetPixelIndex(indexes+x)); p++; } number_rows++; } for (x=0; x < (ssize_t) image->columns; x++) { y_vector[x].red+=scale.y*x_vector[x].red; y_vector[x].green+=scale.y*x_vector[x].green; y_vector[x].blue+=scale.y*x_vector[x].blue; if (scale_image->matte != MagickFalse) y_vector[x].opacity+=scale.y*x_vector[x].opacity; if (scale_indexes != (IndexPacket *) NULL) y_vector[x].index+=scale.y*x_vector[x].index; } span.y-=scale.y; scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1, exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte != MagickFalse) alpha=QuantumScale*GetPixelAlpha(p); x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p)); x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p)); x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p)); if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p); if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) (alpha* GetPixelIndex(indexes+x)); p++; } number_rows++; next_row=MagickFalse; } s=scanline; for (x=0; x < (ssize_t) image->columns; x++) { pixel.red=y_vector[x].red+span.y*x_vector[x].red; pixel.green=y_vector[x].green+span.y*x_vector[x].green; pixel.blue=y_vector[x].blue+span.y*x_vector[x].blue; if (image->matte != MagickFalse) pixel.opacity=y_vector[x].opacity+span.y*x_vector[x].opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index=y_vector[x].index+span.y*x_vector[x].index; s->red=pixel.red; s->green=pixel.green; s->blue=pixel.blue; if (scale_image->matte != MagickFalse) s->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) s->index=pixel.index; s++; y_vector[x]=zero; } scale.y-=span.y; if (scale.y <= 0) { scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } span.y=1.0; } if (scale_image->columns == image->columns) { /* Transfer scanline to scaled image. */ s=scanline; for (x=0; x < (ssize_t) scale_image->columns; x++) { if (scale_image->matte != MagickFalse) alpha=QuantumScale*(QuantumRange-s->opacity); alpha=PerceptibleReciprocal(alpha); SetPixelRed(q,ClampToQuantum(alpha*s->red)); SetPixelGreen(q,ClampToQuantum(alpha*s->green)); SetPixelBlue(q,ClampToQuantum(alpha*s->blue)); if (scale_image->matte != MagickFalse) SetPixelOpacity(q,ClampToQuantum(s->opacity)); if (scale_indexes != (IndexPacket *) NULL) SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*s->index)); q++; s++; } } else { /* Scale X direction. */ pixel=zero; next_column=MagickFalse; span.x=1.0; s=scanline; t=scale_scanline; for (x=0; x < (ssize_t) image->columns; x++) { scale.x=(double) scale_image->columns/(double) image->columns; while (scale.x >= span.x) { if (next_column != MagickFalse) { pixel=zero; t++; } pixel.red+=span.x*s->red; pixel.green+=span.x*s->green; pixel.blue+=span.x*s->blue; if (image->matte != MagickFalse) pixel.opacity+=span.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=span.x*s->index; t->red=pixel.red; t->green=pixel.green; t->blue=pixel.blue; if (scale_image->matte != MagickFalse) t->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) t->index=pixel.index; scale.x-=span.x; span.x=1.0; next_column=MagickTrue; } if (scale.x > 0) { if (next_column != MagickFalse) { pixel=zero; next_column=MagickFalse; t++; } pixel.red+=scale.x*s->red; pixel.green+=scale.x*s->green; pixel.blue+=scale.x*s->blue; if (scale_image->matte != MagickFalse) pixel.opacity+=scale.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=scale.x*s->index; span.x-=scale.x; } s++; } if (span.x > 0) { s--; pixel.red+=span.x*s->red; pixel.green+=span.x*s->green; pixel.blue+=span.x*s->blue; if (scale_image->matte != MagickFalse) pixel.opacity+=span.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=span.x*s->index; } if ((next_column == MagickFalse) && ((ssize_t) (t-scale_scanline) < (ssize_t) scale_image->columns)) { t->red=pixel.red; t->green=pixel.green; t->blue=pixel.blue; if (scale_image->matte != MagickFalse) t->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) t->index=pixel.index; } /* Transfer scanline to scaled image. */ t=scale_scanline; for (x=0; x < (ssize_t) scale_image->columns; x++) { if (scale_image->matte != MagickFalse) alpha=QuantumScale*(QuantumRange-t->opacity); alpha=PerceptibleReciprocal(alpha); SetPixelRed(q,ClampToQuantum(alpha*t->red)); SetPixelGreen(q,ClampToQuantum(alpha*t->green)); SetPixelBlue(q,ClampToQuantum(alpha*t->blue)); if (scale_image->matte != MagickFalse) SetPixelOpacity(q,ClampToQuantum(t->opacity)); if (scale_indexes != (IndexPacket *) NULL) SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*t->index)); t++; q++; } } if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse) { status=MagickFalse; break; } proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } scale_view=DestroyCacheView(scale_view); image_view=DestroyCacheView(image_view); /* Free allocated memory. */ y_vector=(MagickPixelPacket *) RelinquishMagickMemory(y_vector); scale_scanline=(MagickPixelPacket *) RelinquishMagickMemory(scale_scanline); if (scale_image->rows != image->rows) scanline=(MagickPixelPacket *) RelinquishMagickMemory(scanline); x_vector=(MagickPixelPacket *) RelinquishMagickMemory(x_vector); scale_image->type=image->type; if (status == MagickFalse) scale_image=DestroyImage(scale_image); return(scale_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h u m b n a i l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThumbnailImage() changes the size of an image to the given dimensions and % removes any associated profiles. The goal is to produce small low cost % thumbnail images suited for display on the Web. % % The format of the ThumbnailImage method is: % % Image *ThumbnailImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ThumbnailImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleFactor 5 char value[MaxTextExtent]; const char *name; Image *thumbnail_image; MagickRealType x_factor, y_factor; size_t version; struct stat attributes; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); x_factor=(MagickRealType) columns/(MagickRealType) image->columns; y_factor=(MagickRealType) rows/(MagickRealType) image->rows; if ((x_factor*y_factor) > 0.1) thumbnail_image=ResizeImage(image,columns,rows,image->filter,image->blur, exception); else if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128)) thumbnail_image=ResizeImage(image,columns,rows,image->filter, image->blur,exception); else { Image *sample_image; sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows, exception); if (sample_image == (Image *) NULL) return((Image *) NULL); thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter, image->blur,exception); sample_image=DestroyImage(sample_image); } if (thumbnail_image == (Image *) NULL) return(thumbnail_image); (void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page); if (thumbnail_image->matte == MagickFalse) (void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel); thumbnail_image->depth=8; thumbnail_image->interlace=NoInterlace; /* Strip all profiles except color profiles. */ ResetImageProfileIterator(thumbnail_image); for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; ) { if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) { (void) DeleteImageProfile(thumbnail_image,name); ResetImageProfileIterator(thumbnail_image); } name=GetNextImageProfile(thumbnail_image); } (void) DeleteImageProperty(thumbnail_image,"comment"); (void) CopyMagickString(value,image->magick_filename,MaxTextExtent); if (strstr(image->magick_filename,"//") == (char *) NULL) (void) FormatLocaleString(value,MaxTextExtent,"file://%s", image->magick_filename); (void) SetImageProperty(thumbnail_image,"Thumb::URI",value); (void) CopyMagickString(value,image->magick_filename,MaxTextExtent); if (GetPathAttributes(image->filename,&attributes) != MagickFalse) { (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) attributes.st_mtime); (void) SetImageProperty(thumbnail_image,"Thumb::MTime",value); } (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) attributes.st_mtime); (void) FormatMagickSize(GetBlobSize(image),MagickFalse,value); (void) ConcatenateMagickString(value,"B",MaxTextExtent); (void) SetImageProperty(thumbnail_image,"Thumb::Size",value); (void) FormatLocaleString(value,MaxTextExtent,"image/%s",image->magick); LocaleLower(value); (void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value); (void) SetImageProperty(thumbnail_image,"software", GetMagickVersion(&version)); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) image->magick_columns); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) image->magick_rows); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Height",value); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) GetImageListLength(image)); (void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value); return(thumbnail_image); }
calib.c
/* Copyright 2013-2016. The Regents of the University of California. * Copyright 2016. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2012-2016 Martin Uecker <martin.uecker@med.uni-goettingen.de> * 2013 Dara Bahri <dbahri123@gmail.com> * 2015-2016 Siddharth Iyer <sid8795@gmail.com> * * * Uecker M, Lai P, Murphy MJ, Virtue P, Elad M, Pauly JM, Vasanawala SS, Lustig M. * ESPIRiT - An Eigenvalue Approach to Autocalibrating Parallel MRI: Where SENSE * meets GRAPPA. Magn Reson Med, 71:990-1001 (2014) * * Iyer S, Ong F, Lustig M. * Towards A Parameter Free ESPIRiT: Soft-Weighting For Robust Coil Sensitivity Estimation. * Presented in the session: "New Frontiers In Image Reconstruction" at ISMRM 2016. * http://www.ismrm.org/16/program_files/O86.htm * */ #include <assert.h> #include <complex.h> #include <math.h> #include <stdbool.h> #include "num/multind.h" #include "num/fft.h" #include "num/flpmath.h" #include "num/linalg.h" #include "num/lapack.h" #include "num/casorati.h" #include "num/rand.h" #include "misc/misc.h" #include "misc/mri.h" #include "misc/resize.h" #include "misc/debug.h" #include "misc/utils.h" #include "calib/calmat.h" #include "calib/cc.h" #include "calib/softweight.h" #include "calib.h" #ifdef USE_CUDA #include "calib/calibcu.h" #endif #if 0 #define CALMAT_SVD #endif #if 0 #define FLIP #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif static void eigen_herm3(int M, int N, float val[M], complex float matrix[N][N]) // ordering might be different to herm2 { complex float mout[M][N]; for (int li = 0; li < N; li++) for (int lj = 0; lj < li; lj++) matrix[lj][li] = conj(matrix[li][lj]); //mat_identity(M, N, mout); orthiter(M, N, 30, val, mout, matrix); for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) matrix[i][j] = mout[i][j]; } static float scurve(float x) { if (x <= -1.) return 0.; if (x >= 1.) return 1.; return 0.5 * (1. + 2. * x / (1. + powf(x, 2.))); } static float crop_weight_function(float crth, float val) { return scurve((sqrtf(val) - crth) / (1. - crth)); } static float crop_thresh_function(float crth, float val) { return (val <= crth) ? 0. : 1.; } typedef float (*weight_function)(float crth, float val); static void crop_weight(const long dims[DIMS], complex float* ptr, weight_function fun, float crth, const complex float* map) { long xx = dims[0]; long yy = dims[1]; long zz = dims[2]; long cc = dims[3]; long mm = dims[4]; assert(DIMS > 5); assert(1 == md_calc_size(DIMS - 5, dims + 5)); for (long m = 0; m < mm; m++) { #pragma omp parallel for for (long k = 0; k < zz; k++) { for (long i = 0; i < yy; i++) { for (long j = 0; j < xx; j++) { float val = cabsf(map[((m * zz + k) * yy + i) * xx + j]); for (long c = 0; c < cc; c++) ptr[(((m * cc + c) * zz + k) * yy + i) * xx + j] *= fun(crth, val); } } } } } void crop_sens(const long dims[DIMS], complex float* ptr, bool soft, float crth, const complex float* map) { crop_weight(dims, ptr, soft ? crop_weight_function : crop_thresh_function, crth, map); } /** * sure_crop - This determines the crop-threshold to use as described in the talk: "Towards A Parameter * Free ESPIRiT: Soft-Weighting For Robust Coil Sensitivity Estimation". This was given at the * session: "New Frontiers In Image Reconstruction" at ISMRM 2016. * * Parameters: * var - Estimated variance in data. * evec_dims - The eigenvector dimensions. * evec_data - The eigenvectors. * eptr - The eigenvalues. * calreg_dims - Dimension of the calibration region. * calreg - Calibration data. */ static float sure_crop(float var, const long evec_dims[5], complex float* evec_data, complex float* eptr, const long calreg_dims[5], const complex float* calreg) { long num_maps = evec_dims[4]; // Construct low-resolution image long im_dims[5]; md_select_dims(5, 15, im_dims, evec_dims); complex float* im = md_alloc_sameplace(5, im_dims, CFL_SIZE, calreg); md_clear(5, im_dims, im, CFL_SIZE); md_resize_center(5, im_dims, im, calreg_dims, calreg, CFL_SIZE); ifftuc(5, im_dims, FFT_FLAGS, im, im); // Temporary vector for crop dimensions long cropdims[5]; md_select_dims(5, 15, cropdims, calreg_dims); cropdims[4] = num_maps; // Eigenvectors (M) complex float* M = md_alloc_sameplace(5, evec_dims, CFL_SIZE, calreg); md_copy(5, evec_dims, M, evec_data, CFL_SIZE); // Temporary eigenvector holder to hold low resolution maps complex float* LM = md_alloc_sameplace(5, evec_dims, CFL_SIZE, calreg); // Temporary holder for projection calreg complex float* TC = md_alloc_sameplace(5, calreg_dims, CFL_SIZE, calreg); // Temporary holder to hold low resolution calib maps complex float* CM = md_alloc_sameplace(5, cropdims, CFL_SIZE, calreg); // Eigenvalues (W) long W_dims[5]; md_select_dims(5, 23, W_dims, evec_dims); complex float* W = md_alloc_sameplace(5, W_dims, CFL_SIZE, calreg); md_copy(5, W_dims, W, eptr, CFL_SIZE); // Place holder for the inner product result complex float* ip = md_alloc_sameplace(5, W_dims, CFL_SIZE, calreg); // Place holder for the projection result complex float* proj = md_alloc_sameplace(5, im_dims, CFL_SIZE, calreg); // Place holder for divergence term long div_dims[5] = MD_INIT_ARRAY(5, 1); complex float* div = md_alloc_sameplace(5, div_dims, CFL_SIZE, calreg); // Calculating strides. long str1_ip[5]; long str2_ip[5]; long stro_ip[5]; md_calc_strides(5, str1_ip, im_dims, CFL_SIZE); md_calc_strides(5, str2_ip, evec_dims, CFL_SIZE); md_calc_strides(5, stro_ip, W_dims, CFL_SIZE); long str1_proj[5]; long str2_proj[5]; long stro_proj[5]; md_calc_strides(5, str1_proj, W_dims, CFL_SIZE); md_calc_strides(5, str2_proj, evec_dims, CFL_SIZE); md_calc_strides(5, stro_proj, im_dims, CFL_SIZE); long str1_div[5]; long str2_div[5]; long stro_div[5]; md_calc_strides(5, str1_div, evec_dims, CFL_SIZE); md_calc_strides(5, str2_div, evec_dims, CFL_SIZE); md_calc_strides(5, stro_div, div_dims, CFL_SIZE); long tdims_ip[5]; long tdims_proj[5]; for (int i = 0; i < 5; i++) { assert((im_dims[i] == evec_dims[i]) || (1 == im_dims[i]) || (1 == evec_dims[i])); assert((W_dims[i] == evec_dims[i]) || (1 == W_dims[i]) || (1 == evec_dims[i])); tdims_ip[i] = (1 == im_dims[i]) ? evec_dims[i] : im_dims[i]; tdims_proj[i] = (1 == W_dims[i]) ? evec_dims[i] : W_dims[i]; } // Starting parameter sweep with SURE. float mse = -1.; float old_mse = 0.; float s = -0.1; float c = 0.99; long ctr1 = 0; long ctr2 = 0; debug_printf(DP_INFO, "---------------------------------------------\n"); debug_printf(DP_INFO, "| CTR1 | CTR2 | Crop | Est. MSE |\n"); debug_printf(DP_INFO, "---------------------------------------------\n"); while (fabs(s) > 1.E-4) { ctr1++; while ( (c < 0.999) && (c > 0.001) && ( (ctr2 <= 1) || (mse < old_mse))) { ctr2++; md_clear(5, W_dims, ip, CFL_SIZE); md_clear(5, im_dims, proj, CFL_SIZE); md_clear(5, div_dims, div, CFL_SIZE); md_clear(5, evec_dims, M, CFL_SIZE); md_clear(5, evec_dims, LM, CFL_SIZE); md_clear(5, calreg_dims, TC, CFL_SIZE); md_copy(5, evec_dims, M, evec_data, CFL_SIZE); old_mse = mse; mse = 0.; crop_weight(evec_dims, M, crop_thresh_function, c, W); md_zfmacc2(5, tdims_ip, stro_ip, ip, str1_ip, im, str2_ip, M); // Projection. md_zfmac2(5, tdims_proj, stro_proj, proj, str1_proj, ip, str2_proj, M); fftuc(5, im_dims, FFT_FLAGS, proj, proj); // Low res proj img. md_resize_center(5, calreg_dims, TC, im_dims, proj, CFL_SIZE); md_resize_center(5, im_dims, proj, calreg_dims, TC, CFL_SIZE); ifftuc(5, im_dims, FFT_FLAGS, proj, proj); for (long jdx = 0; jdx < md_calc_size(5, im_dims); jdx++) mse += powf(cabsf(im[jdx] - proj[jdx]), 2.); fftuc(5, evec_dims, FFT_FLAGS, LM, M); // low-res maps . md_resize_center(5, cropdims, CM, evec_dims, LM, CFL_SIZE); md_resize_center(5, evec_dims, LM, cropdims, CM, CFL_SIZE); ifftuc(5, evec_dims, FFT_FLAGS, LM, LM); md_zfmacc2(5, evec_dims, stro_div, div, str1_div, LM, str2_div, LM); // Calc SURE div using low res maps. mse += 2. * var * crealf(*div); if (ctr2 == 1) debug_printf(DP_INFO, "| %4ld | %4ld | %0.4f | %0.12e |\n", ctr1, ctr2, c, mse); else debug_printf(DP_INFO, "| | %4ld | %0.4f | %0.12e |\n", ctr2, c, mse); c = c + s; } c -= s; ctr2 = 0; s = -s / 2; c += s; } c = c + s; debug_printf(DP_INFO, "---------------------------------------------\n"); md_free(im); md_free(TC); md_free(CM); md_free(M); md_free(LM); md_free(W); md_free(ip); md_free(proj); md_free(div); debug_printf(DP_DEBUG1, "Calculated c: %.4f\n", c); return c; } void calone(const struct ecalib_conf* conf, const long cov_dims[4], complex float* imgcov, int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data) { assert(1 == md_calc_size(DIMS - 5, calreg_dims + 5)); #if 1 long nskerns_dims[5]; complex float* nskerns; compute_kernels(conf, nskerns_dims, &nskerns, SN, svals, calreg_dims, data); #else long channels = calreg_dims[3]; long kx = conf->kdims[0]; long ky = conf->kdims[1]; long kz = conf->kdims[2]; long nskerns_dims[5] = { kx, ky, kz, channels, 0 }; long N = md_calc_size(4, nskerns_dims); assert(N > 0); nskerns_dims[4] = N; complex float* nskerns = md_alloc(5, nskerns_dims, CFL_SIZE); long nr_kernels = channels; nskerns_dims[4] = channels; spirit_kernel(nskerns_dims, nskerns, calreg_dims, data); #endif compute_imgcov(cov_dims, imgcov, nskerns_dims, nskerns); md_free(nskerns); } /* calculate point-wise maps * */ void eigenmaps(const long out_dims[DIMS], complex float* optr, complex float* eptr, const complex float* imgcov2, const long msk_dims[3], const bool* msk, bool orthiter, bool ecal_usegpu) { #ifdef USE_CUDA if (ecal_usegpu) { //FIXME cuda version should be able to return sensitivities for a subset of image-space points assert(!msk); eigenmapscu(out_dims, optr, eptr, imgcov2); return; } #else assert(!ecal_usegpu); #endif long channels = out_dims[3]; long maps = out_dims[4]; assert(DIMS >= 5); assert(1 == md_calc_size(DIMS - 5, out_dims + 5)); assert(maps <= channels); long xx = out_dims[0]; long yy = out_dims[1]; long zz = out_dims[2]; float scale = 1.; // for some reason, not if (msk_dims) { assert(msk_dims[0] == xx); assert(msk_dims[1] == yy); assert(msk_dims[2] == zz); } md_clear(5, out_dims, optr, CFL_SIZE); #pragma omp parallel for collapse(3) for (long k = 0; k < zz; k++) { for (long j = 0; j < yy; j++) { for (long i = 0; i < xx; i++) { if (!msk || msk[i + xx * (j + yy * k)]) { float val[channels]; complex float cov[channels][channels]; complex float tmp[channels * (channels + 1) / 2]; for (long l = 0; l < channels * (channels + 1) / 2; l++) tmp[l] = imgcov2[((l * zz + k) * yy + j) * xx + i] / scale; unpack_tri_matrix(channels, cov, tmp); if (orthiter) eigen_herm3(maps, channels, val, cov); else lapack_eig(channels, val, cov); for (long u = 0; u < maps; u++) { long ru = (orthiter ? maps : channels) - 1 - u; for (long v = 0; v < channels; v++) optr[((((u * channels + v) * zz + k) * yy + j) * xx + i)] = cov[ru][v]; if (NULL != eptr) eptr[((u * zz + k) * yy + j) * xx + i] = val[ru]; } } } } } } void caltwo(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* emaps, const long in_dims[4], complex float* in_data, const long msk_dims[3], const bool* msk) { long xx = out_dims[0]; long yy = out_dims[1]; long zz = out_dims[2]; long xh = in_dims[0]; long yh = in_dims[1]; long zh = in_dims[2]; long channels = out_dims[3]; long cosize = channels * (channels + 1) / 2; assert(DIMS >= 5); assert(1 == md_calc_size(DIMS - 5, out_dims + 5)); assert(in_dims[3] == cosize); long cov_dims[4] = { xh, yh, zh, cosize }; long covbig_dims[4] = { xx, yy, zz, cosize }; assert(((xx == 1) && (xh == 1)) || (xx >= xh)); assert(((yy == 1) && (yh == 1)) || (yy >= yh)); assert(((zz == 1) && (zh == 1)) || (zz >= zh)); assert((1 == xh) || (0 == xh % 2)); assert((1 == yh) || (0 == yh % 2)); assert((1 == zh) || (0 == zh % 2)); complex float* imgcov2 = md_alloc(4, covbig_dims, CFL_SIZE); debug_printf(DP_DEBUG1, "Resize...\n"); sinc_zeropad(4, covbig_dims, imgcov2, cov_dims, in_data); debug_printf(DP_DEBUG1, "Point-wise eigen-decomposition...\n"); eigenmaps(out_dims, out_data, emaps, imgcov2, msk_dims, msk, conf->orthiter, conf->usegpu); md_free(imgcov2); } void calone_dims(const struct ecalib_conf* conf, long cov_dims[4], long channels) { long kx = conf->kdims[0]; long ky = conf->kdims[1]; long kz = conf->kdims[2]; cov_dims[0] = (1 == kx) ? 1 : (2 * kx); cov_dims[1] = (1 == ky) ? 1 : (2 * ky); cov_dims[2] = (1 == kz) ? 1 : (2 * kz); cov_dims[3] = channels * (channels + 1) / 2; } const struct ecalib_conf ecalib_defaults = { { 6, 6, 6 }, 0.001, -1, -1., false, false, 0.8, true, false, -1., false, true, -1., false}; void calib2(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* eptr, int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data, const long msk_dims[3], const bool* msk) { long channels = calreg_dims[3]; long maps = out_dims[4]; assert(calreg_dims[3] == out_dims[3]); assert(maps <= channels); assert(1 == md_calc_size(DIMS - 5, out_dims + 5)); assert(1 == md_calc_size(DIMS - 5, calreg_dims + 5)); complex float rot[channels][channels]; if (conf->rotphase) { // rotate the the phase with respect to the first principle component long scc_dims[DIMS] = MD_INIT_ARRAY(DIMS, 1); scc_dims[COIL_DIM] = channels; scc_dims[MAPS_DIM] = channels; scc(scc_dims, &rot[0][0], calreg_dims, data); } else { for (int i = 0; i < channels; i++) for (int j = 0; j < channels; j++) rot[i][j] = (i == j) ? 1. : 0.; } long cov_dims[4]; calone_dims(conf, cov_dims, channels); complex float* imgcov = md_alloc(4, cov_dims, CFL_SIZE); calone(conf, cov_dims, imgcov, SN, svals, calreg_dims, data); caltwo(conf, out_dims, out_data, eptr, cov_dims, imgcov, msk_dims, msk); /* Intensity and phase normalization similar as proposed * for adaptive combine (Walsh's method) in * Griswold et al., ISMRM 10:2410 (2002) */ if (conf->intensity) { debug_printf(DP_DEBUG1, "Normalize...\n"); /* I think the reason this works is because inhomogeneity usually * comes from only a few coil elements which are close. The l1-norm * is more resilient against such outliers. -- Martin */ normalizel1(DIMS, COIL_FLAG, out_dims, out_data); md_zsmul(DIMS, out_dims, out_data, out_data, sqrtf((float)channels)); } float c = (conf->crop >= 0.) ? conf->crop : sure_crop(conf->var, out_dims, out_data, eptr, calreg_dims, data); debug_printf(DP_DEBUG1, "Crop maps... (c = %.2f)\n", c); crop_sens(out_dims, out_data, conf->softcrop, c, eptr); debug_printf(DP_DEBUG1, "Fix phase...\n"); fixphase2(DIMS, out_dims, COIL_DIM, rot[0], out_data, out_data); md_free(imgcov); } void calib(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* eptr, int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data) { calib2(conf, out_dims, out_data, eptr, SN, svals, calreg_dims, data, NULL, NULL); } static void perturb(const long dims[2], complex float* vecs, float amt) { complex float* noise = md_alloc(2, dims, CFL_SIZE); md_gaussian_rand(2, dims, noise); for (long j = 0; j < dims[1]; j++) { float nrm = md_znorm(1, dims, noise + j * dims[0]); complex float val = amt / nrm; md_zsmul(1, dims, noise + j * dims[0], noise + j * dims[0], val); } md_zadd(2, dims, vecs, vecs, noise); for (long j = 0; j < dims[1]; j++) { float nrm = md_znorm(1, dims, vecs + j * dims[0]); complex float val = 1 / nrm; md_zsmul(1, dims, vecs + j * dims[0], vecs + j * dims[0], val); } md_free(noise); } static int number_of_kernels(const struct ecalib_conf* conf, int N, const float val[N]) { int n = 0; if (-1 != conf->numsv) { n = conf->numsv; assert(-1. == conf->percentsv); assert(-1. == conf->threshold); } else if (conf->percentsv != -1.) { n = N * conf->percentsv / 100.; assert(-1 == conf->numsv); assert(-1. == conf->threshold); } else { assert(-1 == conf->numsv); assert(-1. == conf->percentsv); for (int i = 0; i < N; i++) if (val[i] / val[0] > sqrtf(conf->threshold)) n++; } if (val[0] <= 0.) error("No signal.\n"); debug_printf(DP_DEBUG1, "Using %d/%ld kernels (%.2f%%, last SV: %f%s).\n", n, N, (float)n / (float)N * 100., (n > 0) ? (val[n - 1] / val[0]) : 1., conf->weighting ? ", weighted" : ""); float tr = 0.; for (int i = 0; i < N; i++) { tr += powf(val[i], 2.); debug_printf(DP_DEBUG3, "SVALS %f (%f)\n", val[i], val[i] / val[0]); } debug_printf(DP_DEBUG3, "\nTRACE: %f (%f)\n", tr, tr / (float)N); assert(n <= N); return n; } void compute_kernels(const struct ecalib_conf* conf, long nskerns_dims[5], complex float** nskerns_ptr, int SN, float val[SN], const long caldims[DIMS], const complex float* caldata) { assert(1 == md_calc_size(DIMS - 5, caldims + 5)); nskerns_dims[0] = conf->kdims[0]; nskerns_dims[1] = conf->kdims[1]; nskerns_dims[2] = conf->kdims[2]; nskerns_dims[3] = caldims[3]; long N = md_calc_size(4, nskerns_dims); assert(N > 0); nskerns_dims[4] = N; complex float* nskerns = md_alloc(5, nskerns_dims, CFL_SIZE); *nskerns_ptr = nskerns; PTR_ALLOC(complex float[N][N], vec); assert(NULL != val); assert(SN == N); debug_printf(DP_DEBUG1, "Build calibration matrix and SVD...\n"); #ifdef CALMAT_SVD calmat_svd(conf->kdims, N, *vec, val, caldims, caldata); if (conf->weighting) soft_weight_singular_vectors(N, conf->var, conf->kdims, caldims, val, val); for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) #ifndef FLIP nskerns[i * N + j] = ((*vec)[j][i]) * (conf->weighting ? val[i] : 1.); #else nskerns[i * N + j] = ((*vec)[j][N - 1 - i]) * (conf->weighting ? val[N - 1 - i] : 1.); #endif #else covariance_function(conf->kdims, N, *vec, caldims, caldata); debug_printf(DP_DEBUG1, "Eigen decomposition... (size: %ld)\n", N); // we could apply Nystroem method here to speed it up float tmp_val[N]; lapack_eig(N, tmp_val, *vec); // reverse and square root, test for smaller null to avoid NaNs for (int i = 0; i < N; i++) val[i] = (tmp_val[N - 1 - i] < 0.) ? 0. : sqrtf(tmp_val[N - 1 - i]); if (conf->weighting) soft_weight_singular_vectors(N, conf-> var, conf->kdims, caldims, val, val); for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) #ifndef FLIP nskerns[i * N + j] = (*vec)[N - 1 - i][j] * (conf->weighting ? val[i] : 1.); // flip #else nskerns[i * N + j] = (*vec)[i][j] * (conf->weighting ? val[N - 1 - i] : 1.); // flip #endif #endif if (conf->perturb > 0.) { long dims[2] = { N, N }; perturb(dims, nskerns, conf->perturb); } #ifndef FLIP nskerns_dims[4] = number_of_kernels(conf, N, val); #else nskerns_dims[4] = N - number_of_kernels(conf, N, val); #endif PTR_FREE(vec); } void compute_imgcov(const long cov_dims[4], complex float* imgcov, const long nskerns_dims[5], const complex float* nskerns) { debug_printf(DP_DEBUG1, "Zeropad...\n"); long xh = cov_dims[0]; long yh = cov_dims[1]; long zh = cov_dims[2]; long kx = nskerns_dims[0]; long ky = nskerns_dims[1]; long kz = nskerns_dims[2]; long channels = nskerns_dims[3]; long nr_kernels = nskerns_dims[4]; long imgkern_dims[5] = { xh, yh, zh, channels, nr_kernels }; complex float* imgkern1 = md_alloc(5, imgkern_dims, CFL_SIZE); complex float* imgkern2 = md_alloc(5, imgkern_dims, CFL_SIZE); md_resize_center(5, imgkern_dims, imgkern1, nskerns_dims, nskerns, CFL_SIZE); // resort array debug_printf(DP_DEBUG1, "FFT (juggling)...\n"); long istr[5]; long mstr[5]; long idim[5] = { xh, yh, zh, channels, nr_kernels }; long mdim[5] = { nr_kernels, channels, xh, yh, zh }; md_calc_strides(5, istr, idim, CFL_SIZE); md_calc_strides(5, mstr, mdim, CFL_SIZE); long m2str[5] = { mstr[2], mstr[3], mstr[4], mstr[1], mstr[0] }; ifftmod(5, imgkern_dims, FFT_FLAGS, imgkern1, imgkern1); ifft2(5, imgkern_dims, FFT_FLAGS, m2str, imgkern2, istr, imgkern1); float scalesq = (kx * ky * kz) * (xh * yh * zh); // second part for FFT scaling md_free(imgkern1); debug_printf(DP_DEBUG1, "Calculate Gram matrix...\n"); int cosize = channels * (channels + 1) / 2; assert(cov_dims[3] == cosize); #pragma omp parallel for collapse(3) for (int k = 0; k < zh; k++) { for (int j = 0; j < yh; j++) { for (int i = 0; i < xh; i++) { complex float gram[cosize]; gram_matrix2(channels, gram, nr_kernels, (const complex float (*)[nr_kernels])(imgkern2 + ((k * yh + j) * xh + i) * (channels * nr_kernels))); #ifdef FLIP // add (scaled) identity matrix for (int i = 0, l = 0; i < channels; i++) for (int j = 0; j <= i; j++, l++) gram[l] = ((i == j) ? (kx * ky * kz) : 0.) - gram[l]; #endif for (int l = 0; l < cosize; l++) imgcov[(((l * zh) + k) * yh + j) * xh + i] = gram[l] / scalesq; } } } md_free(imgkern2); }
dense_pairwise.c
/* Copyright (c) 2016, 2021 Drew Schmidt All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Functions for computing covariance, (pearson) correlation, and cosine similarity #include <math.h> #include <stdlib.h> #include <string.h> #include "utils/safeomp.h" #include "coop.h" #include "utils/fill.h" #include "utils/inverse.h" #include "utils/special_vals.h" static inline void compute_sums(const int m, const size_t mi, const double *const restrict vec, const double *const restrict x, double *restrict sumx, double *restrict sumy, int *restrict len) { int k; *sumx = 0; *sumy = 0; *len = 0; PLEASE_VECTORIZE for (k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) { *sumx += vec[k]; *sumy += x[k + mi]; (*len)++; } } } // ----------------------------------------------------------------------------- // cosine // ----------------------------------------------------------------------------- int coop_cosine_mat_inplace_pairwise(const bool inv, const int m, const int n, const double *const restrict x, double *restrict cos) { int check; double *vec = malloc(m * sizeof(*vec)); CHECKMALLOC(vec); for (int j=0; j<n; j++) { const size_t mj = (size_t)m*j; memcpy(vec, x+mj, m*sizeof(*vec)); const size_t nj = (size_t)n*j; #pragma omp parallel for shared(j, vec, cos) if(m*n > OMP_MIN_SIZE) for (int i=j; i<n; i++) { const size_t mi = (size_t)m*i; double xx, xy, yy; xx = xy = yy = 0.0; int len = 0; SAFE_SIMD for (int k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) { const double xval = vec[k]; const double yval = x[k + mi]; xx += xval * xval; yy += yval * yval; xy += xval * yval; len++; } } if (len == 0) { set_na_real(cos + (i + nj)); continue; } cos[i + nj] = xy / sqrt(xx * yy); } } free(vec); if (inv) { check = inv_sym_chol(n, cos); CHECKRET(check); } symmetrize(n, cos); return COOP_OK; } // ----------------------------------------------------------------------------- // correlation // ----------------------------------------------------------------------------- int coop_pcor_mat_inplace_pairwise(const bool inv, const int m, const int n, const double * const restrict x, double *restrict cor) { int check; double *vec = malloc(m * sizeof(*vec)); CHECKMALLOC(vec); for (int j=0; j<n; j++) { const size_t mj = (size_t)m*j; memcpy(vec, x+mj, m*sizeof(*vec)); const size_t nj = (size_t)n*j; #pragma omp parallel for shared(j, vec, cor) if(m*n > OMP_MIN_SIZE) for (int i=j; i<n; i++) { const size_t mi = (size_t)m*i; int len; double meanx, meany; compute_sums(m, mi, vec, x, &meanx, &meany, &len); if (len == 0 || len == 1) { set_na_real(cor + (i + nj)); set_na_real(cor + (j + (size_t)n*i)); continue; } const double dlen = (double) len; meanx /= dlen; meany /= dlen; double sdx = 0.; double sdy = 0.; SAFE_SIMD for (int k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) { sdx += (vec[k] - meanx)*(vec[k] - meanx); sdy += (x[k + mi] - meany)*(x[k + mi] - meany); } } sdx = sqrt(sdx/(dlen-1.)); sdy = sqrt(sdy/(dlen-1.)); double mmcp = 0.0; SAFE_SIMD for (int k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) mmcp += (vec[k] - meanx) * (x[k + mi] - meany); } cor[i + nj] = mmcp / sdx / sdy / (dlen - 1.0);; } } free(vec); if (inv) { check = inv_sym_chol(n, cor); CHECKRET(check); } symmetrize(n, cor); return COOP_OK; } // ----------------------------------------------------------------------------- // covariance // ----------------------------------------------------------------------------- int coop_covar_mat_inplace_pairwise(const bool inv, const int m, const int n, const double *const restrict x, double *restrict cov) { int check; double *vec = malloc(m * sizeof(*vec)); CHECKMALLOC(vec); for (int j=0; j<n; j++) { const size_t mj = (size_t)m*j; memcpy(vec, x+mj, m*sizeof(*vec)); const size_t nj = (size_t)n*j; #pragma omp parallel for shared(j, vec, cov) if(m*n > OMP_MIN_SIZE) for (int i=j; i<n; i++) { const size_t mi = (size_t)m*i; int len; double meanx, meany; compute_sums(m, mi, vec, x, &meanx, &meany, &len); if (len == 0) { set_na_real(cov + (i + nj)); set_na_real(cov + (j + (size_t)n*i)); continue; } meanx /= (double) len; meany /= (double) len; double mmcp = 0.0; SAFE_SIMD for (int k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) mmcp += (vec[k] - meanx) * (x[k + mi] - meany); } cov[i + nj] = mmcp * ((double) 1.0/(len-1)); } } free(vec); if (inv) { check = inv_sym_chol(n, cov); CHECKRET(check); } symmetrize(n, cov); return COOP_OK; } int coop_covar_matmat_inplace_pairwise(const bool inv, const int m, const int nx, const double *const restrict x, const int ny, const double *const restrict y, double *restrict cov) { int check; double *vec = malloc(m * sizeof(*vec)); CHECKMALLOC(vec); for (int j=0; j<ny; j++) { const size_t mj = (size_t)m*j; memcpy(vec, y+mj, m*sizeof(*vec)); #pragma omp parallel for shared(j, vec, cov) if(m*nx > OMP_MIN_SIZE) for (int i=0; i<nx; i++) { const size_t mi = (size_t)m*i; int len; double meanx, meany; compute_sums(m, mi, vec, x, &meanx, &meany, &len); if (len == 0) { set_na_real(cov + (i + nx*j)); set_na_real(cov + (j + nx*i)); continue; } meanx /= (double) len; meany /= (double) len; double mmcp = 0.0; SAFE_SIMD for (int k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) mmcp += (vec[k] - meanx) * (x[k + mi] - meany); } cov[i + nx*j] = mmcp * ((double) 1.0/(len-1)); } } free(vec); if (nx == ny && inv) { check = inv_gen_lu(nx, cov); CHECKRET(check); } return COOP_OK; }
CALPHADFreeEnergyFunctionsBinary.h
#ifndef included_CALPHADFreeEnergyFunctionsBinary #define included_CALPHADFreeEnergyFunctionsBinary #include "CALPHADSpeciesPhaseGibbsEnergy.h" #include "InterpolationType.h" #include "Phases.h" #include "datatypes.h" #include "functions.h" #include <boost/property_tree/ptree.hpp> #include <cassert> #include <fstream> #include <iostream> #include <math.h> namespace Thermo4PFM { class CALPHADFreeEnergyFunctionsBinary { public: CALPHADFreeEnergyFunctionsBinary(boost::property_tree::ptree& input_db, boost::optional<boost::property_tree::ptree&> newton_db, const EnergyInterpolationType energy_interp_func_type, const ConcInterpolationType conc_interp_func_type); ~CALPHADFreeEnergyFunctionsBinary() { delete[] fenergy_diag_filename_; }; double computeFreeEnergy(const double temperature, const double* const conc, const PhaseIndex pi, const bool gp = false); void computeDerivFreeEnergy(const double temperature, const double* const conc, const PhaseIndex pi, double*); void computeSecondDerivativeFreeEnergy(const double temp, const double* const conc, const PhaseIndex pi, double* d2fdc2); bool computeCeqT(const double temperature, double* ceq, const int maxits = 20, const bool verbose = false); void preRunDiagnostics(const double T0 = 300., const double T1 = 3000.); int computePhaseConcentrations(const double temperature, const double* conc, const double* const phi, double* x); void energyVsPhiAndC(const double temperature, const double* const ceq, const bool found_ceq, const double phi_well_scale, const int npts_phi = 51, const int npts_c = 50); // # of compositions to use (>1) void printEnergyVsComposition( const double temperature, std::ostream& os, const int npts = 100); double fchem(const double* const phi, const double* const conc, const double temperature); void printEnergyVsPhiHeader(const double temperature, const int nphi, const int nc, const double cmin, const double cmax, const double slopec, std::ostream& os) const; void printEnergyVsPhi(const double* const conc, const double temperature, const double phi_well_scale, const int npts, const double slopec, std::ostream& os); void computeTdependentParameters(const double temperature, CalphadDataType* Lmix_L, CalphadDataType* Lmix_A, CalphadDataType* fA, CalphadDataType* fB); private: EnergyInterpolationType energy_interp_func_type_; ConcInterpolationType conc_interp_func_type_; void readNewtonparameters(boost::property_tree::ptree& newton_db); char* fenergy_diag_filename_; double newton_tol_; double newton_alpha_; int newton_maxits_; bool newton_verbose_; // Single species energies in each phase // size 2 for species 0 and 1 CALPHADSpeciesPhaseGibbsEnergy g_species_phaseL_[2]; CALPHADSpeciesPhaseGibbsEnergy g_species_phaseA_[2]; // size 4 for L0, L1, L2, L3, // can contain up to 3 coefficients a,b,c for a+b*T, // possibly +c*T*ln(T) if compiled with -DLMIX_WTLOGT CalphadDataType LmixPhaseL_[4][MAX_POL_T_INDEX]; CalphadDataType LmixPhaseA_[4][MAX_POL_T_INDEX]; double (*fun_ptr_arr_[3])(const double){ linear_interp_func, pbg_interp_func, harmonic_interp_func }; void readParameters(boost::property_tree::ptree& calphad_db); #ifdef HAVE_OPENMP_OFFLOAD #pragma omp declare target #endif // energy of species "is" in phase L,A double getFenergyPhaseL(const short is, const double temperature) { return g_species_phaseL_[is].fenergy(temperature); } double getFenergyPhaseA(const short is, const double temperature) { return g_species_phaseA_[is].fenergy(temperature); } CalphadDataType lmixPhase( const unsigned index, const PhaseIndex pi, const double temperature) { // assert(index < 4); switch (pi) { case PhaseIndex::phaseL: return LmixPhaseL_[index][0] + LmixPhaseL_[index][1] * temperature #ifdef LMIX_WTLOGT + LmixPhaseL_[index][2] * temperature * log(temperature) #endif ; case PhaseIndex::phaseA: return LmixPhaseA_[index][0] + LmixPhaseA_[index][1] * temperature #ifdef LMIX_WTLOGT + LmixPhaseA_[index][2] * temperature * log(temperature) #endif ; default: return NAN; } } #ifdef HAVE_OPENMP_OFFLOAD #pragma omp end declare target #endif void computePhasesFreeEnergies(const double temperature, const double* const hphi, const double conc, double& fl, double& fa); }; } #endif
test.c
#include <stdio.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; int fail = 0; INIT(); // ************************** // Series 1: no dist_schedule // ************************** // // Test: #iterations == #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute simd for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // **************************** // Series 2: with dist_schedule // **************************** // // Test: #iterations == #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute simd dist_schedule(static,1) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations == #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute simd dist_schedule(static,512) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations == #teams, dist_schedule(#iterations/10), variable chunk size // ZERO(A); int ten = 10; int chunkSize = 512/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,1) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,500) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(#iterations/10), variable chunk size // ZERO(A); ten = 10; chunkSize = 500/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,1) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,123) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(#iterations) // ZERO(A); ten = 10; chunkSize = 123/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // **************************** // Series 3: with ds attributes // **************************** // // Test: private // ZERO(A); ZERO(B); double p = 2.0, q = 4.0; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) { #pragma omp distribute simd private(p,q) for(int i = 0 ; i < N ; i++) { p = 2; q = 3; A[i] += p; B[i] += q; } } } for(int i = 0 ; i < N ; i++) { if (A[i] != TRIALS*2) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) TRIALS*2, A[i]); fail = 1; } if (B[i] != TRIALS*3) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) TRIALS*3, B[i]); fail = 1; } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: firstprivate // ZERO(A); ZERO(B); p = 2.0, q = 4.0; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target // implicit firstprivate for p and q, their initial values being 2 and 4 for each target invocation #pragma omp teams num_teams(64) { #pragma omp distribute simd firstprivate(p,q) for(int i = 0 ; i < 128 ; i++) { // 2 iterations for each team p += 3.0; // p and q are firstprivate to the team, and as such incremented twice (2 iterations per team) q += 7.0; A[i] += p; B[i] += q; } } } for(int i = 0 ; i < 128 ; i++) { if (i % 2 == 0) { if (A[i] != (2.0+3.0)*TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]); fail = 1; } if (B[i] != (4.0+7.0)*TRIALS) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0)*TRIALS, B[i]); fail = 1; } } else { if (A[i] != (2.0+3.0*2)*TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0*2)*TRIALS, A[i]); fail = 1; } if (B[i] != (4.0+7.0*2)*TRIALS) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0*2)*TRIALS, B[i]); fail = 1; } } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: lastprivate // int lastpriv = -1; #pragma omp target map(tofrom:lastpriv) #pragma omp teams num_teams(10) #pragma omp distribute simd lastprivate(lastpriv) for(int i = 0 ; i < omp_get_num_teams() ; i++) lastpriv = omp_get_team_num(); if(lastpriv != 9) { printf("lastpriv value is %d and should have been %d\n", lastpriv, 9); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // ************************** // Series 4: collapse // ************************** // // Test: 2 loops // double * S = malloc(N*N*sizeof(double)); double * T = malloc(N*N*sizeof(double)); double * U = malloc(N*N*sizeof(double)); for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) { S[i*N+j] = 0.0; T[i*N+j] = 1.0; U[i*N+j] = 2.0; } for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target map(tofrom:S[:N*N]), map(to:T[:N*N],U[:N*N]) #pragma omp teams num_teams(512) #pragma omp distribute simd collapse(2) for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) S[i*N+j] += T[i*N+j] + U[i*N+j]; // += 3 at each t } for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) if (S[i*N+j] != TRIALS*3.0) { printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, S[i*N+j]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: 3 loops // int M = N/8; double * V = malloc(M*M*M*sizeof(double)); double * Z = malloc(M*M*M*sizeof(double)); for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) { V[i*M*M+j*M+k] = 2.0; Z[i*M*M+j*M+k] = 3.0; } for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target map(tofrom:V[:M*M*M]), map(to:Z[:M*M*M]) #pragma omp teams num_teams(512) #pragma omp distribute simd collapse(3) for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) V[i*M*M+j*M+k] += Z[i*M*M+j*M+k]; // += 3 at each t } for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) if (V[i*M*M+j*M+k] != 2.0+TRIALS*3.0) { printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, V[i*M*M+j*M+k]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); return 0; }
GB_unaryop__minv_int32_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int32_int8 // op(A') function: GB_tran__minv_int32_int8 // C type: int32_t // A type: int8_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 32) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 32) ; // casting #define GB_CASTING(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int32_int8 ( int32_t *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mpi.c
#include "mpi.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> //CITS 5507 //Student ID: 21804416 //Name: SUN QIANG //Student ID: 21658782 //Name: WANG YU // Where to use mpi to speed up: // 1. sort and find blocks; // 2. Use mpi to fullfill quicksort //Where use openmp speed up: //Here we have four tasks using openmp to speed up //1. use openmp to sort all columns //2. use openmp to generate blocks //3. The key point here, use openmp to sort all the signtures(29333879), the openmp speed up is very useful here // At first, I can not sort 29333879 signtures with my computer using only one thread, always core down // Then, when I use openmp here, then problem solved. //4. Divide sorted signatures with different parts, and find collisions with openmp //Where still need speed up //There are still some problems we can do to speed up: //The last column(500), generated 29321366 signtures, and the left 499 colunms generated 12510 signtures // So when we find blocks, most time spent on find blocks in 500 columns with one thread, the other threads are idle. // However, the time spent to find the blocks in 500 colunm is not very long; // And we need to keep this program working for any M*N matrix, so I have not done any improvement here. // we can change the N & M to make this program suitable for some other N*M matrix // we can change the dia to other value // Change the Max value if the number of blocks is larger than that #define N 500 #define M 4400 #define dia 0.000001 #define Max 30000000 #define core_number 12 #define MASTER 0 #define send_data_tag 2001 #define return_data_tag 2002 // --------- // The data array to store the data information // The key array to store key information // When we want to sort values in each column in data, we only need to sort the seq array to store the sequences // Location of data value, not directly modify the data array. // Signs array to store the signtures for blocks // Location array to store the row ids and column for corresponding signture, for example 434044402220999499 means // The block combined by 434,444,222,999 rows from 499 column. // Indexs to store how many blocks generated // Id is the same function as the seq array, to store the location information of the signture //---------- //readData() and readkey() to read values from txt, generate two arrays // Generate() is to generate the seq array // sort() is to sort each column in data // SelectSort() Bubblesort() are used to sort the values (we can use some other methods) // Findblock() and findblockcol() are used to generate columns //Sortsign() and BubbleSortsign() are single thread ways to sort the signtures. // quicksort() is the method which use multiple threads with openmp to sort the signtures based on quicksort. // resulttxt() is used to print the outcome with two colunm (signture,id) static double data[N][M]; static long key[M]; static int seq[N][M]; static long signs[Max]; static long all[Max]; static long location[Max]; static int indexs; static int id[Max]={-1}; int k=0; void readData(); void readkey(); void generate(); void sort(int start, int end); void SelectSort(int a[],int n,double b[]); void BubbleSort(int a[],int n,double b[]); void findblock(int begin, int last); void findblockcol(int i,double x[]); void Sortsign(int a[],int n, long b[]); void BubbleSortsign(int a[],int n,long b[]); void resulttxt(); void quicksort(long arr[], int low_index, int high_index); int partition(long * a, int p, int r); int main (void){ int my_id, ierr, sender,part_index;; int numtasks, taskid, len, rc; char hostname[MPI_MAX_PROCESSOR_NAME]; int partner, message; long individual_index; //initiate MPI MPI_Status status; MPI_Init(NULL,NULL); MPI_Comm_size(MPI_COMM_WORLD, &numtasks); MPI_Comm_rank(MPI_COMM_WORLD, &taskid); MPI_Get_processor_name(hostname, &len); //pre-process for the data readData(); readkey(); generate(); //Find the number of cols each task needs to deal with int col_each_task = (int)(500 / (numtasks - 1)) + 1; if (taskid != MASTER) { // sort the cols(From starts to ends ) for the task //And then find the blocks int starts=(taskid - 1) * col_each_task; int ends = taskid * col_each_task; if (ends >499) { ends = 499; } printf ("MPI task %d has started... From %d col to %d \n", taskid,starts,ends); sort(starts,ends); findblock(starts,ends); printf("Number of blcoks found %d in node-%d \n",indexs, taskid); //Send back the number of blocks and signtures array found by the task ierr = MPI_Send( &indexs, 1, MPI_INT, MASTER, taskid, MPI_COMM_WORLD); if (ierr != MPI_SUCCESS) printf("%d: Send signature_number failure\n", taskid); ierr = MPI_Send(&signs[0],indexs,MPI_LONG,MASTER, taskid, MPI_COMM_WORLD); if (ierr != MPI_SUCCESS) printf("%d: Send signature failure\n", taskid); } else { //recieve all the blocks and the numbers for(taskid = 1; taskid < numtasks;taskid++) { ierr = MPI_Recv( &part_index, 1, MPI_INT, MPI_ANY_SOURCE,taskid, MPI_COMM_WORLD, &status); if (ierr != MPI_SUCCESS) printf("%d: Receive signatures failure\n", taskid); sender = status.MPI_SOURCE; ierr = MPI_Recv(&all[indexs], part_index,MPI_LONG,MPI_ANY_SOURCE,taskid,MPI_COMM_WORLD,&status); if (ierr != MPI_SUCCESS) printf("%d: Receive signatures failure\n", taskid); printf("Partial number of blocks %i returned from process %i\n", part_index, sender); indexs += part_index; } printf("All number of blocks %d \n",indexs); //sort the signtures and find the collisons int i; for(i=0;i<indexs;i++){ id[i]=i; } // // quicksort for openmp here quicksort(all,0,indexs-1); // //Find collsions and print it to result.txt resulttxt(); // } } MPI_Finalize(); } //Print out the results into results.txt with (signtures,id)(id generate by four row ids and the column id) void resulttxt(){ int left =0; int indexsnon=0; long where; FILE *fp; fp = fopen("results.txt","a"); int i; int col=0; for(i=0;i<indexs;i++){ if(all[id[i]]!=all[id[i+1]] && all[id[i]]!=all[id[i-1]]){ indexsnon=indexsnon+1; } else{ fprintf(fp,"%ld %ld\n",all[id[i]],all[id[i]]); col=col+1; } } fclose(fp); printf("The number of collsions found: %d\n",col); } //Read keys void readkey() { FILE *fp; char *line = NULL; size_t len = 0; ssize_t read; int line_index = 0; int column_index = 0; fp = fopen("keys.txt","r"); if (fp ==NULL) exit(EXIT_FAILURE); while((read = getline(&line,&len,fp))!=-1) { char *pch; pch=strtok(line," "); while (pch!=NULL) { sscanf(pch,"%ld",&key[column_index]); column_index +=1; pch = strtok(NULL," "); } line_index+=1; } fclose(fp); if(line) free(line); } //Read data void readData() { FILE *fp; char *line = NULL; size_t len = 0; ssize_t read; int line_index = 0; int column_index; fp = fopen("data.txt","r"); if (fp ==NULL) exit(EXIT_FAILURE); while((read = getline(&line,&len,fp))!=-1) { char *pch; column_index=0; pch=strtok(line," ,"); while (pch!=NULL) { sscanf(pch,"%lf",&data[column_index][line_index]); column_index +=1; pch = strtok(NULL," ,"); } line_index+=1; } fclose(fp); if(line) free(line); } void generate(){ //Generate an array with N*M, and each a[i] with 0-4399 int i,j; for(i = 0;i<N;i++) { for (j =0;j<M;j++) { seq[i][j]=j; } } } //Sort each column void sort(int start,int end) { int i,j; if (end > 499){ end = 499; } ////////Here to change #pragma omp parallel for for(i = start;i < end;i++){ // We can use different methods to sort here SelectSort(seq[i],M,data[i]); // BubbleSort(seq[i],M,data[i]); } } //Select sort method to sort column void SelectSort(int a[],int n, double b[]) { int i,j; for(i=0;i<n-1;i++) { int k=i; for(j=i+1;j<n;j++) if(b[a[k]]>b[a[j]]) k=j; if(k!=i) { int temp=a[i]; a[i]=a[k]; a[k]=temp; } } } //Select sort method for signtures void Sortsign(int a[],int n, long b[]) { int i,j; for(i=0;i<n-1;i++) { //printf("%d %ld\n", i,b[i]); int k=i; for(j=i+1;j<n;j++) if(b[a[k]]>b[a[j]]) k=j; if(k!=i) { int temp=a[i]; a[i]=a[k]; a[k]=temp; } } } //Bubble sort for signtures void BubbleSortsign(int a[],int n,long b[]) { int i,j; for(i=n-1;i>0;--i){ // printf("%d %ld\n", i,b[i]); for(j=0;j<i;j++){ if(b[a[j]]>b[a[j+1]]) { int temp=a[j]; a[j]=a[j+1]; a[j+1]=temp; } } } printf("ok\n"); } //Find all blocks void findblock(int begin, int last){ ///////Here to change int i,j; //Use the openmp here to find blocks #pragma omp parallel for //Change i<N-1 to calculate the outcome without last column. i<N to calculate the outcome with last column. for(i=begin;i<last;i++){ findblockcol(i,data[i]); } } void findblockcol(int i,double x[]){ int nums=0; int a,c,d; long b; long sign; long uni; // FILE *fp; // fp=fopen("blocks.txt","a"); #pragma omp parallel for for(a=0;a<M;a++){ b=a+1; while(x[seq[i][b]]-x[seq[i][a]]<dia && b <M && x[seq[i][a]]!=0 && x[seq[i][b]]!=0) { c=b+1; while(x[seq[i][c]]-x[seq[i][a]]<dia && c<M && x[seq[i][c]]!=0){ d = c+1; while(x[seq[i][d]]-x[seq[i][a]]<dia && d<M && x[seq[i][d]]!=0) { indexs=indexs+1; sign=key[a]+key[b]+key[c]+key[d]; signs[indexs-1]=sign; uni=(a*1000000000000+b*100000000+c*10000+d)*1000+i; location[indexs-1]=uni; // fprintf(fp,"%ld %ld \n",sign,uni); d = d+1; nums=nums+1; } c=c+1; } b=b+1; } } // fclose(fp); // printf("Number found for %d column: %d\n",i+1,nums); } int partition(long * a, int p, int r) { int lt[r-p]; int gt[r-p]; int i; int j; int key = a[r]; int lt_n = 0; int gt_n = 0; #pragma omp parallel for for(i = p; i < r; i++){ if(a[i] < a[r]){ lt[lt_n++] = a[i]; }else{ gt[gt_n++] = a[i]; } } for(i = 0; i < lt_n; i++){ a[p + i] = lt[i]; } a[p + lt_n] = key; for(j = 0; j < gt_n; j++){ a[p + lt_n + j + 1] = gt[j]; } return p + lt_n; } void quicksort(long * a, int p, int r) { int div; if(p < r){ div = partition(a, p, r); #pragma omp parallel sections { #pragma omp section quicksort(a, p, div - 1); #pragma omp section quicksort(a, div + 1, r); } } }
kvstore_dist_server.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file mxnet_node.h * \brief implement mxnet nodes */ #ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #include <mxnet/c_api.h> #include <mxnet/kvstore.h> #include <ps/ps.h> #include <queue> #include <string> #include <mutex> #include <condition_variable> #include <memory> #include <functional> #include <future> #include <vector> #include "../profiler/profiler.h" #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/init_op.h" namespace mxnet { namespace kvstore { // maintain same order in frontend. enum class CommandType { kController, kSetMultiPrecision, kStopServer, kSyncMode, kSetGradientCompression, kSetProfilerParams }; enum class RequestType { kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull }; struct DataHandleType { RequestType requestType; int dtype; }; /*! * Uses Cantor pairing function to generate a unique number given two numbers. * This number can also be inverted to find the unique pair whose Cantor value is this number. * Ref: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function * \param requestType RequestType * \param dtype integer * \return Cantor value of arguments */ static int GetCommandType(RequestType requestType, int d) { int m = static_cast<int>(requestType); return (((m + d) * (m + d + 1)) / 2) + d; } /*! * Unpairs Cantor value and finds the two integers used to pair. * Then returns DataHandleType object with those numbers. * \param cmd DataHandleCommand generated by GetCommandType function * \return DataHandleType */ static DataHandleType DepairDataHandleType(int cmd) { int w = std::floor((std::sqrt(8 * cmd + 1) - 1) / 2); int t = ((w * w) + w) / 2; int y = cmd - t; int x = w - y; CHECK_GE(x, 0); CHECK_GE(y, 0); DataHandleType type; type.requestType = static_cast<RequestType>(x); type.dtype = y; return type; } /** * \brief executor runs a function using the thread called \ref Start */ class Executor { public: /** * \brief start the executor */ void Start() { std::unique_lock<std::mutex> lk(mu_); while (true) { cond_.wait(lk, [this] { return !queue_.empty(); }); Block blk = std::move(queue_.front()); queue_.pop(); lk.unlock(); if (blk.f) { blk.f(); blk.p->set_value(); } else { blk.p->set_value(); break; } lk.lock(); } } /** * \brief function */ typedef std::function<void()> Func; /** * \brief let the thread called \ref Start to exec a function. threadsafe */ void Exec(const Func& func) { Block blk(func); auto fut = blk.p->get_future(); { std::lock_guard<std::mutex> lk(mu_); queue_.push(std::move(blk)); cond_.notify_one(); } fut.wait(); } /** * \brief stop the thread, threadsafe */ void Stop() { Exec(Func()); } private: struct Block { explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) {} Func f; std::shared_ptr<std::promise<void>> p; }; std::queue<Block> queue_; std::mutex mu_; std::condition_variable cond_; }; class KVStoreDistServer { public: KVStoreDistServer() { using namespace std::placeholders; ps_server_ = new ps::KVServer<char>(0); static_cast<ps::SimpleApp*>(ps_server_) ->set_request_handle(std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2)); ps_server_->set_request_handle(std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3)); sync_mode_ = false; gradient_compression_ = std::make_shared<GradientCompression>(); log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false); } ~KVStoreDistServer() { profiler::Profiler::Get()->SetState(profiler::Profiler::ProfilerState(0)); delete ps_server_; } void set_controller(const KVStore::Controller& controller) { CHECK(controller); controller_ = controller; } void set_updater(const KVStore::Updater& updater) { CHECK(updater); updater_ = updater; } /** * \brief blocked until received the command \a kSyncMode */ void Run() { exec_.Start(); } private: struct UpdateBuf { std::vector<ps::KVMeta> request; NDArray merged; // temp_array is used to cast received values as float32 for computation if required NDArray temp_array; }; void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) { CommandType recved_type = static_cast<CommandType>(recved.head); switch (recved_type) { case CommandType::kStopServer: exec_.Stop(); break; case CommandType::kSyncMode: sync_mode_ = true; break; case CommandType::kSetGradientCompression: gradient_compression_->DecodeParams(recved.body); break; case CommandType::kSetProfilerParams: // last char is the type of profiler command ProcessServerProfilerCommands( static_cast<KVStoreServerProfilerCommand>(recved.body.back() - '0'), recved.body); break; case CommandType::kSetMultiPrecision: // uses value 1 for message id from frontend if (!multi_precision_) { multi_precision_ = true; CreateMultiPrecisionCopies(); } break; case CommandType::kController: // this uses value 0 for message id from frontend // let the main thread to execute ctrl, which is necessary for python exec_.Exec([this, recved]() { CHECK(controller_); controller_(recved.head, recved.body); }); break; } app->Response(recved); } /* * For keys already initialized, if necessary create stored_realt. * This will only be used if by some wrong usage of kvstore, * some keys are initialized before optimizer is set. */ void CreateMultiPrecisionCopies() { for (auto const& stored_entry : store_) { const int key = stored_entry.first; const NDArray& stored = stored_entry.second; if (stored.dtype() != mshadow::kFloat32) { auto& stored_realt = store_realt_[key]; if (stored.storage_type() == kRowSparseStorage) { stored_realt = NDArray(kRowSparseStorage, stored.shape(), stored.ctx(), true, mshadow::kFloat32); } else { stored_realt = NDArray(stored.shape(), stored.ctx(), false, mshadow::kFloat32); } auto& update = update_buf_[key]; if (!update.merged.is_none()) { if (update.merged.storage_type() == kRowSparseStorage) { update.merged = NDArray(kRowSparseStorage, update.merged.shape(), update.merged.ctx(), true, mshadow::kFloat32); } else { update.merged = NDArray(update.merged.shape(), update.merged.ctx(), false, mshadow::kFloat32); } } CHECK(update.request.size() == 0) << ps::MyRank() << "Multiprecision mode can not be set while pushes are underway." << "Please set optimizer before pushing keys." << key << " " << update.request.size(); CopyFromTo(stored, stored_realt); } } for (auto const& stored_realt_entry : store_realt_) { stored_realt_entry.second.WaitToRead(); } } void ProcessServerProfilerCommands(KVStoreServerProfilerCommand type, const std::string& body) { switch (type) { case KVStoreServerProfilerCommand::kSetConfig: SetProfilerConfig(body.substr(0, body.size() - 1)); break; case KVStoreServerProfilerCommand::kState: MXSetProfilerState(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kPause: MXProfilePause(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kDump: MXDumpProfile(static_cast<int>(body.front() - '0')); break; } } void SetProfilerConfig(std::string params_str) { std::vector<std::string> elems; mxnet::kvstore::split(params_str, ',', std::back_inserter(elems)); std::vector<const char*> ckeys; std::vector<const char*> cvals; ckeys.reserve(elems.size()); cvals.reserve(elems.size()); for (size_t i = 0; i < elems.size(); i++) { std::vector<std::string> parts; mxnet::kvstore::split(elems[i], ':', std::back_inserter(parts)); CHECK_EQ(parts.size(), 2) << "Improper profiler config passed from worker"; CHECK(!parts[0].empty()) << "ProfilerConfig parameter is empty"; CHECK(!parts[1].empty()) << "ProfilerConfig value is empty for parameter " << parts[0]; if (parts[0] == "filename") { parts[1] = "rank" + std::to_string(ps::MyRank()) + "_" + parts[1]; } char* ckey = new char[parts[0].length() + 1]; std::snprintf(ckey, parts[0].length() + 1, "%s", parts[0].c_str()); ckeys.push_back(ckey); char* cval = new char[parts[1].length() + 1]; std::snprintf(cval, parts[1].length() + 1, "%s", parts[1].c_str()); cvals.push_back(cval); } MXSetProfilerConfig(elems.size(), &ckeys[0], &cvals[0]); for (size_t i = 0; i < ckeys.size(); i++) { delete[] ckeys[i]; delete[] cvals[i]; } } void DataHandleEx(const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { DataHandleType type = DepairDataHandleType(req_meta.cmd); switch (type.requestType) { case RequestType::kRowSparsePushPull: DataHandleRowSparse(type, req_meta, req_data, server); break; case RequestType::kCompressedPushPull: DataHandleCompressed(type, req_meta, req_data, server); break; case RequestType::kDefaultPushPull: DataHandleDefault(type, req_meta, req_data, server); break; } } inline bool has_multi_precision_copy(const DataHandleType type) { return multi_precision_ && type.dtype != mshadow::kFloat32; } inline void ApplyUpdates(const DataHandleType type, const int key, const ps::KVPairs<char>& req_data, UpdateBuf* update_buf, ps::KVServer<char>* server) { if (!sync_mode_ || update_buf->request.size() == (size_t)ps::NumWorkers()) { // let the main thread to execute updater_, which is necessary for python auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array; if (updater_) { exec_.Exec([this, key, &update, &stored]() { CHECK(updater_); updater_(key, update, &stored); }); } else { CHECK(sync_mode_) << "Updater needs to be set for async mode"; // if no updater, just copy CopyFromTo(update_buf->merged, &stored); } if (log_verbose_) { LOG(INFO) << "sent response to " << update_buf->request.size() << " workers"; } /** * Request can be for either push, pull or pushpull * If pull flag is set, respond immediately with the updated values * Otherwise, only send the notification */ bool has_pull = false; for (const auto& req : update_buf->request) { has_pull = has_pull || req.pull; } if (has_pull) { // if there is a pull request, perform WaitToRead() once before DefaultStorageResponse if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]); stored.WaitToRead(); for (const auto& req : update_buf->request) { if (req.pull) { DefaultStorageResponse(type, key, req, req_data, server); } } update_buf->request.clear(); } else { // otherwise, send response directly for (const auto& req : update_buf->request) { server->Response(req); } update_buf->request.clear(); if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]); stored.WaitToRead(); } } else { update_buf->merged.WaitToRead(); } } void DecodeRowIds(const ps::SArray<ps::Key>& keys, int64_t* indices, const int64_t master_key, const int64_t num_rows) { indices[0] = 0; for (int64_t i = 1; i <= num_rows; i++) { int key = DecodeKey(keys[i]); auto row_id = key - master_key; indices[i - 1] = row_id; } } void AccumulateRowSparseGrads(const DataHandleType type, const NDArray& recved, UpdateBuf* updateBuf) { NDArray out(kRowSparseStorage, updateBuf->merged.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) CopyFromTo(recved, updateBuf->temp_array); const NDArray& to_merge = has_multi_precision_copy(type) ? updateBuf->temp_array : recved; // accumulate row_sparse gradients using namespace mshadow; Engine::Get()->PushAsync( [to_merge, updateBuf, out](RunContext ctx, Engine::CallbackOnComplete on_complete) { op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>( {}, {}, {to_merge, updateBuf->merged}, {kWriteTo}, {out}); on_complete(); }, to_merge.ctx(), {to_merge.var(), updateBuf->merged.var()}, {out.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); CopyFromTo(out, &(updateBuf->merged), 0); updateBuf->merged.WaitToRead(); } void RowSparsePullResponse(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { if (log_verbose_) LOG(INFO) << "pull: " << master_key; ps::KVPairs<char> response; if (num_rows == 0) { std::vector<int> lens(req_data.keys.size(), 0); response.keys = req_data.keys; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); return; } const NDArray& stored = store_[master_key]; if (has_multi_precision_copy(type)) stored.WaitToRead(); CHECK(!stored.is_none()) << "init " << master_key << " first"; auto shape = stored.shape(); auto unit_len = shape.ProdShape(1, shape.ndim()); const int num_bytes = mshadow::mshadow_sizeof(type.dtype); const int unit_size = unit_len * num_bytes; const char* data = static_cast<char*>(stored.data().dptr_); auto len = num_rows * unit_size; // concat values response.vals.resize(len); #pragma omp parallel for for (size_t i = 1; i <= num_rows; i++) { int key = DecodeKey(req_data.keys[i]); int64_t row_id = key - master_key; const auto src = data + row_id * unit_size; auto begin = (i - 1) * unit_size; auto end = i * unit_size; response.vals.segment(begin, end).CopyFrom(src, unit_size); } // setup response response.keys = req_data.keys; std::vector<int> lens(req_data.keys.size(), unit_len); lens[0] = 0; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); } void InitRowSparseStored(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { auto& stored = has_multi_precision_copy(type) ? store_realt_[master_key] : store_[master_key]; int dtype = type.dtype; int num_bytes = mshadow::mshadow_sizeof(dtype); auto unit_len = req_data.lens[1] / num_bytes; CHECK_GT(unit_len, 0); size_t ds[] = {num_rows, (size_t)unit_len}; mxnet::TShape dshape(ds, ds + 2); CHECK_EQ(req_data.vals.size(), num_rows * unit_len * num_bytes); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); stored = NDArray(kRowSparseStorage, dshape, Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) { store_[master_key] = NDArray(kRowSparseStorage, dshape, Context(), true, type.dtype); } Engine::Get()->PushAsync( [this, recved, stored, type](RunContext ctx, Engine::CallbackOnComplete on_complete) { NDArray rsp = stored; stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])}); mshadow::Stream<cpu>* s = ctx.get_stream<cpu>(); using namespace mxnet::op; nnvm::dim_t nnr = rsp.shape()[0]; MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, { IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>(); mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx); }); TBlob rsp_data = rsp.data(); // copies or casts as appropriate ndarray::Copy<cpu, cpu>(recved.data(), &rsp_data, Context(), Context(), RunContext()); on_complete(); }, recved.ctx(), {recved.var()}, {stored.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); if (has_multi_precision_copy(type)) { CopyFromTo(stored, store_[master_key]); store_[master_key].WaitToRead(); } stored.WaitToRead(); server->Response(req_meta); } void DataHandleRowSparse(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { int master_key = DecodeKey(req_data.keys[0]); auto num_rows = req_data.keys.size() - 1; auto& stored = store_[master_key]; if (req_meta.push) { CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty"; CHECK_EQ(req_data.lens[0], 0); if (stored.is_none()) { if (log_verbose_) LOG(INFO) << "initial push: " << master_key; // initialization CHECK_GT(num_rows, 0) << "init with empty data is not supported"; InitRowSparseStored(type, master_key, num_rows, req_meta, req_data, server); return; } else { if (log_verbose_) LOG(INFO) << "push: " << master_key << " " << req_data.keys; auto& updates = update_buf_[master_key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(kRowSparseStorage, stored.shape(), Context(), false, mshadow::kFloat32); } if (num_rows == 0) { if (sync_mode_) { if (updates.request.empty()) { // reset to zeros int merged_dtype = has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype; updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, merged_dtype); } // else nothing to aggregate updates.request.push_back(req_meta); ApplyUpdates(type, master_key, req_data, &updates, server); } else { server->Response(req_meta); } } else { auto unit_len = req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype); CHECK_GT(unit_len, 0); // indices std::vector<int64_t> indices(num_rows); DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows); // data TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask); size_t ds[] = {(size_t)num_rows, (size_t)unit_len}; mxnet::TShape dshape(ds, ds + 2); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) // row_sparse NDArray NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0); if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); AccumulateRowSparseGrads(type, recved, &updates); } updates.request.push_back(req_meta); ApplyUpdates(type, master_key, req_data, &updates, server); } } } else { // pull RowSparsePullResponse(type, master_key, num_rows, req_meta, req_data, server); } } void DefaultStorageResponse(const DataHandleType type, const int key, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { ps::KVPairs<char> response; const NDArray& stored = store_[key]; CHECK(!stored.is_none()) << "init " << key << " first"; // as server returns when store_realt is ready in this case if (has_multi_precision_copy(type)) stored.WaitToRead(); auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype()); response.keys = req_data.keys; response.lens = {len}; // TODO(mli) try to remove this CopyFrom response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len); server->Response(req_meta, response); } void DataHandleCompressed(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { CHECK_EQ(type.dtype, mshadow::kFloat32) << "Gradient compression is currently supported for fp32 only"; if (req_meta.push) { // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished // first for dummy key which represents original size of array, whose len is 0 CHECK_EQ(req_data.keys.size(), (size_t)2); CHECK_EQ(req_data.lens.size(), (size_t)2); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]); int original_size = DecodeKey(req_data.keys[0]); int key = DecodeKey(req_data.keys[1]); auto& stored = store_[key]; size_t ds[] = {(size_t)req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype)}; mxnet::TShape dshape(ds, ds + 1); TBlob recv_blob(reinterpret_cast<real_t*>(req_data.vals.data()), dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); NDArray decomp_buf = decomp_buf_[key]; dshape = mxnet::TShape{(int64_t)original_size}; if (decomp_buf.is_none()) { decomp_buf = NDArray(dshape, Context()); } if (stored.is_none()) { stored = NDArray(dshape, Context()); gradient_compression_->Dequantize(recved, &stored, 0); server->Response(req_meta); stored.WaitToRead(); } else if (sync_mode_) { // synced push auto& merged = update_buf_[key]; if (merged.merged.is_none()) { merged.merged = NDArray(dshape, Context()); } if (merged.request.size() == 0) { gradient_compression_->Dequantize(recved, &merged.merged, 0); } else { gradient_compression_->Dequantize(recved, &decomp_buf, 0); merged.merged += decomp_buf; } merged.request.push_back(req_meta); ApplyUpdates(type, key, req_data, &merged, server); } else { // async push gradient_compression_->Dequantize(recved, &decomp_buf, 0); exec_.Exec([this, key, &decomp_buf, &stored]() { CHECK(updater_); updater_(key, decomp_buf, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull CHECK_EQ(req_data.keys.size(), (size_t)1); CHECK_EQ(req_data.lens.size(), (size_t)0); int key = DecodeKey(req_data.keys[0]); DefaultStorageResponse(type, key, req_meta, req_data, server); } } void DataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { // do some check CHECK_EQ(req_data.keys.size(), (size_t)1); if (req_meta.push) { CHECK_EQ(req_data.lens.size(), (size_t)1); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]); } int key = DecodeKey(req_data.keys[0]); auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished if (req_meta.push) { size_t ds[] = {(size_t)req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)}; mxnet::TShape dshape(ds, ds + 1); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); if (stored.is_none()) { // initialization stored = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); CopyFromTo(recved, &stored, 0); server->Response(req_meta); if (has_multi_precision_copy(type)) { auto& stored_dtype = store_[key]; stored_dtype = NDArray(dshape, Context(), false, type.dtype); CopyFromTo(stored, stored_dtype); stored_dtype.WaitToRead(); } stored.WaitToRead(); } else { auto& updates = update_buf_[key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32); } if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); updates.merged += updates.temp_array; } else { updates.merged += recved; } } updates.request.push_back(req_meta); ApplyUpdates(type, key, req_data, &updates, server); } } else { DefaultStorageResponse(type, key, req_meta, req_data, server); } } int DecodeKey(ps::Key key) { auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()]; return key - kr.begin(); } /** * \brief user defined mode for push */ bool sync_mode_; KVStore::Controller controller_; KVStore::Updater updater_; /** * \brief store_ contains the value at kvstore for each key */ std::unordered_map<int, NDArray> store_; std::unordered_map<int, NDArray> store_realt_; /** * \brief merge_buf_ is a buffer used if sync_mode is true. It represents * values from different workers being merged. The store will be updated * to this value when values from all workers are pushed into this buffer. */ std::unordered_map<int, UpdateBuf> update_buf_; /** * \brief decomp_buf_ is a buffer into which compressed values are * decompressed before merging to the store. used when compress_!='none' */ std::unordered_map<int, NDArray> decomp_buf_; Executor exec_; ps::KVServer<char>* ps_server_; // whether to LOG verbose information bool log_verbose_; /* * \brief whether to use multi precision mode. * in multi precision mode, all weights are stored as float32. * any gradient received will be cast to float32 before accumulation and updating of weights. */ bool multi_precision_; /** * \brief gradient compression object. * starts with none, used after SetGradientCompression sets the type * currently there is no support for unsetting gradient compression */ std::shared_ptr<kvstore::GradientCompression> gradient_compression_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
omp_mxv_1d.c
/* export OMP_PLACES="{0:4:1},{4:4:1},{8:4:1},{12:4:1}" export OMP_PROC_BIND=close export OMP_NUM_THREADS=16 */ # include <stdlib.h> # include <stdio.h> # include <time.h> # include <omp.h> void main ( int argc, char *argv[] ) { double *a, *b, *c; int i, j, k, N; if (argc != 2) { printf ("Usage : %s <matrix size>\n", argv[0]); exit(1); } N = strtol(argv[1], NULL, 10); /* Allocate the matrices. */ a = ( double * ) malloc ( N * N * sizeof ( double ) ); b = ( double * ) malloc ( N * sizeof ( double ) ); c = ( double * ) malloc ( N * sizeof ( double ) ); /* Assign values to the B and C matrices. */ srand ( time ( NULL)); for ( i = 0; i < N; i++ ) for (j = 0; j < N; j++ ) a[i*N+j] = ( double ) rand() / (RAND_MAX * 2.0 - 1.0); for ( i = 0; i < N; i++ ) b[i] = ( double ) rand() / (RAND_MAX * 2.0 - 1.0); #pragma omp parallel for private (i,j,k) shared (a, b, c) for ( i = 0; i < N; i++) { double sum = 0.0; for ( j = 0; j < N; j++ ) { for (k =0; k < N; k++) sum = sum + a[i*N+j] * b[j]; } c[i] = sum; } /*for ( i = 0; i < N; i++ ) { for (j = 0; j < N; j++ ) printf ("%1.3f ", a[i*N+j]); printf("\t %1.3f ", b[i]); printf("\t %1.3f \n", c[i]); }*/ }
displacement_lagrangemultiplier_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * @details This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; ///@} ///@name Life Cycle ///@{ /// Constructor. /** * @param DispRatioTolerance Relative tolerance for displacement error * @param DispAbsTolerance Absolute tolerance for displacement error * @param LMRatioTolerance Relative tolerance for lagrange multiplier error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier error * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType LMRatioTolerance, const TDataType LMAbsTolerance, const bool EnsureContact = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, false); // The displacement solution mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The contact solution mLMRatioTolerance = LMRatioTolerance; mLMAbsTolerance = LMAbsTolerance; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // The default parameters Parameters default_parameters = Parameters(R"( { "ensure_contact" : false, "print_convergence_criterion" : false, "displacement_relative_tolerance" : 1.0e-4, "displacement_absolute_tolerance" : 1.0e-9, "contact_displacement_relative_tolerance" : 1.0e-4, "contact_displacement_absolute_tolerance" : 1.0e-9 })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); // The displacement solution mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble(); // The contact solution mLMRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble(); mLMAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, false); } // Copy constructor. DisplacementLagrangeMultiplierContactCriteria( DisplacementLagrangeMultiplierContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mLMRatioTolerance(rOther.mLMRatioTolerance) ,mLMAbsTolerance(rOther.mLMAbsTolerance) { } /// Destructor. ~DisplacementLagrangeMultiplierContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something // Initialize TDataType disp_solution_norm = 0.0, lm_solution_norm = 0.0, disp_increase_norm = 0.0, lm_increase_norm = 0.0; IndexType disp_dof_num(0),lm_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; TDataType dof_value = 0.0, dof_incr = 0.0; // Loop over Dofs #pragma omp parallel for reduction(+:disp_solution_norm,lm_solution_norm,disp_increase_norm,lm_increase_norm,disp_dof_num,lm_dof_num,dof_id,dof_value,dof_incr) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; if (it_dof->IsFree()) { dof_id = it_dof->EquationId(); dof_value = it_dof->GetSolutionStepValue(0); dof_incr = rDx[dof_id]; const auto curr_var = it_dof->GetVariable(); if ((curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) { lm_solution_norm += dof_value * dof_value; lm_increase_norm += dof_incr * dof_incr; lm_dof_num++; } else { disp_solution_norm += dof_value * dof_value; disp_increase_norm += dof_incr * dof_incr; disp_dof_num++; } } } if(disp_increase_norm == 0.0) disp_increase_norm = 1.0; if(lm_increase_norm == 0.0) lm_increase_norm = 1.0; if(disp_solution_norm == 0.0) disp_solution_norm = 1.0; KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT) && lm_solution_norm == 0.0) << "WARNING::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; const TDataType disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm); const TDataType lm_ratio = std::sqrt(lm_increase_norm/lm_solution_norm); const TDataType disp_abs = std::sqrt(disp_increase_norm)/ static_cast<TDataType>(disp_dof_num); const TDataType lm_abs = std::sqrt(lm_increase_norm)/ static_cast<TDataType>(lm_dof_num); // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance; } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT(" LAGRANGE MUL:\tRATIO = ") << lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << " LAGRANGE MUL:\tRATIO = " << lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl; } } } // We check if converged const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance); const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT) && lm_solution_norm == 0.0) ? true : (lm_ratio <= mLMRatioTolerance || lm_abs <= mLMAbsTolerance); if (disp_converged && lm_converged) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FGRN(" Achieved")); else r_table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart ) override { BaseType::mConvergenceCriteriaIsInitialized = true; ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, true); } } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H */
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-2,3)),ceild(16*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(8*t1+Ny+13,24)),floord(16*t2+Ny+12,24)),floord(16*t1-16*t2+Nz+Ny+11,24));t3++) { for (t4=max(max(max(0,ceild(t1-7,8)),ceild(16*t2-Nz-60,64)),ceild(24*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(8*t1+Nx+13,64)),floord(16*t2+Nx+12,64)),floord(24*t3+Nx+20,64)),floord(16*t1-16*t2+Nz+Nx+11,64));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),24*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),24*t3+22),64*t4+62),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
embedded_postprocess_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Ruben Zorrilla // // #ifndef KRATOS_EMBEDDED_POSTPROCESS_PROCESS_H #define KRATOS_EMBEDDED_POSTPROCESS_PROCESS_H // System includes #include <string> #include <iostream> // External includes // Project includes #include "processes/process.h" #include "includes/define.h" #include "includes/model_part.h" #include "includes/cfd_variables.h" #include "utilities/openmp_utils.h" // Application includes namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Utility to filter the embedded velocity and pressure values class EmbeddedPostprocessProcess : public Process { public: ///@name Type Definitions ///@{ /// Pointer definition of EmbeddedPostprocessProcess KRATOS_CLASS_POINTER_DEFINITION(EmbeddedPostprocessProcess); ///@} ///@name Life Cycle ///@{ /// Constructor. EmbeddedPostprocessProcess(ModelPart& rModelPart) : mrModelPart(rModelPart) { } /// Destructor. ~EmbeddedPostprocessProcess() override{} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ void ExecuteFinalizeSolutionStep() override { const array_1d<double, 3> aux_zero = ZeroVector(3); ModelPart::NodesContainerType& rNodes = mrModelPart.Nodes(); // Simple check if( mrModelPart.NodesBegin()->SolutionStepsDataHas( DISTANCE ) == false ) KRATOS_ERROR << "Nodes do not have DISTANCE variable!"; if( mrModelPart.NodesBegin()->SolutionStepsDataHas( PRESSURE ) == false ) KRATOS_ERROR << "Nodes do not have PRESSURE variable!"; if( mrModelPart.NodesBegin()->SolutionStepsDataHas( VELOCITY ) == false ) KRATOS_ERROR << "Nodes do not have VELOCITY variable!"; if( mrModelPart.NodesBegin()->SolutionStepsDataHas( EMBEDDED_WET_PRESSURE ) == false ) KRATOS_ERROR << "Nodes do not have EMBEDDED_WET_PRESSURE variable!"; if( mrModelPart.NodesBegin()->SolutionStepsDataHas( EMBEDDED_WET_VELOCITY ) == false ) KRATOS_ERROR << "Nodes do not have EMBEDDED_WET_VELOCITY variable!"; // Embedded postprocess variables set #pragma omp parallel for for (int k = 0; k < static_cast<int>(rNodes.size()); ++k) { ModelPart::NodesContainerType::iterator itNode = rNodes.begin() + k; const double dist = itNode->FastGetSolutionStepValue(DISTANCE); double& emb_wet_pres = itNode->FastGetSolutionStepValue(EMBEDDED_WET_PRESSURE); array_1d<double, 3>& emb_wet_vel = itNode->FastGetSolutionStepValue(EMBEDDED_WET_VELOCITY); if (dist >= 0.0) { emb_wet_pres = itNode->FastGetSolutionStepValue(PRESSURE); // Store the positive distance nodes PRESSURE in EMBEDDED_WET_PRESSURE variable emb_wet_vel = itNode->FastGetSolutionStepValue(VELOCITY); // Store the positive distance nodes VELOCITY in EMBEDDED_WET_VELOCITY variable } else { emb_wet_pres = 0.0; // The negative distance nodes EMBEDDED_WET_PRESSURE is set to zero for visualization purposes emb_wet_vel = aux_zero; // The negative distance nodes EMBEDDED_WET_VELOCITY is set to zero for visualization purposes } } } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "EmbeddedPostprocessProcess" ; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override {rOStream << "EmbeddedPostprocessProcess";} /// Print object's data. void PrintData(std::ostream& rOStream) const override {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ModelPart& mrModelPart; ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Default constructor. EmbeddedPostprocessProcess() = delete; /// Assignment operator. EmbeddedPostprocessProcess& operator=(EmbeddedPostprocessProcess const& rOther) = delete; /// Copy constructor. EmbeddedPostprocessProcess(EmbeddedPostprocessProcess const& rOther) = delete; ///@} }; // Class EmbeddedPostprocessProcess ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} ///@} addtogroup block }; // namespace Kratos. #endif // KRATOS_EMBEDDED_POSTPROCESS_PROCESS_H
HashSet.h
/////////////////////////////////////////////////////////////////////////////// // SOFTWARE COPYRIGHT NOTICE AGREEMENT // // This software and its documentation are copyright (2010) by the // // Broad Institute. All rights are reserved. This software is supplied // // without any warranty or guaranteed support whatsoever. The Broad // // Institute is not responsible for its use, misuse, or functionality. // /////////////////////////////////////////////////////////////////////////////// /* * \file HashSet.h * \author tsharpe * \date Aug 20, 2010 * * \brief Big, low-memory-overhead hash table. */ #ifndef FEUDAL_HASHSET_H #define FEUDAL_HASHSET_H #include "feudal/BinaryStream.h" #include "feudal/Iterator.h" #include "math/PowerOf2.h" #include "system/Assert.h" #include "system/SpinLockedData.h" #include "system/WorklistN.h" #include <algorithm> #include <atomic> #include <cstddef> #include <cstring> #include <functional> #include <utility> /// Hopscotch hash set needs an abstract way of producing an array of values /// that can be used when T doesn't have a default constructor. /// This implementation is for the simple case where you can use new and delete. template <class T> class TFactory { public: template <class X> std::allocator<X> alloc(X*) const { return std::allocator<X>(); } T* create( size_t nTs ) const { return new T[nTs]; } void destroy( T* pTs, size_t nTs ) const { delete [] pTs; } }; // Ignore this little helper class. // It merely subclasses some likely-to-be-empty classes so they don't take up // extra space in the hash set classes. /// Template args as for HopscotchHashSet, which follows. template <class T, class H, class C = std::equal_to<T>, class F=TFactory<T> > class HCF : public H, public C, public F { public: typedef typename H::argument_type const& key_type; explicit HCF( H const& h= H(), C const& c = C(), F const& f = F() ) : H(h), C(c), F(f) {} // compiler-supplied copying and destructor are OK size_t hash( key_type val ) const { return static_cast<H const&>(*this)(val); } bool compare( key_type v1, key_type v2 ) const { return static_cast<C const&>(*this)(v1,v2); } friend void swap( HCF& hcf1, HCF& hcf2 ) { using std::swap; swap(static_cast<H&>(hcf1),static_cast<H&>(hcf2)); swap(static_cast<C&>(hcf1),static_cast<C&>(hcf2)); swap(static_cast<F&>(hcf1),static_cast<F&>(hcf2)); } }; class BktStatus { public: enum Status { EMPTY, HEAD, SQUATTER }; // compiler-supplied default constructor, copying and destructor are OK Status getStatus() const { return static_cast<Status>(mStatus); } void setStatus( Status status ) { mStatus = status; } unsigned getOffset() const { return mOffset; } void setOffset( unsigned offset ) { AssertLe(offset,maxOffset()); mOffset = offset; } bool hasNext() const { return mOffset; } BktStatus const* next() const { return this-mOffset; } BktStatus* next() { return this-mOffset; } static unsigned maxOffset() { return (1ul << 6) - 1ul; } private: unsigned char mStatus : 2; unsigned char mOffset : 6; }; TRIVIALLY_SERIALIZABLE(BktStatus); // fwd decl for friend template <class T, class H, class C, class F> class HashSet; /// A fixed-size hash set. /// The hopscotch algorithm provides high load factors without getting in a jam. /// This implementation is optimized for low memory overhead: just a single /// byte per entry. /// /// T, the value_type, must be assignable. It must also be default /// constructable, unless you supply a custom factory. /// /// H is a hashing functor. /// It transforms a T into a size_t, it's hash value: /// size_t H( T const& ) const; /// It must have a typedef called argument_type. /// /// C is a comparison functor. /// It returns true if two T's are equal: /// bool C( T const&, T const& ) const; /// /// F is a factory for an array of Ts. /// /// This class is NOT thread-safe. However, it has a lock and unlock method, /// so you can single-thread it, if you want. That's what HashSet does. template <class T, class H, class C = std::equal_to<T>, class F=TFactory<T> > class HopscotchHashSet : public SpinLockedData { public: typedef T value_type; typedef value_type const* const_pointer; typedef value_type const& const_reference; typedef unsigned size_type; typedef std::ptrdiff_t difference_type; typedef H hash_func_type; typedef C comp_func_type; typedef F factory_type; typedef typename H::argument_type const& key_type; friend class HashSet<T,H,C,F>; class NoRoomException : public std::exception { public: NoRoomException( char const* msg ) : mMsg(msg) {} char const* what() const throw() { return mMsg; } private: char const* mMsg; }; // this is a const_iterator over the value_type entries class const_iterator : public std::iterator<std::bidirectional_iterator_tag,value_type const>, public IteratorBiDiBase<const_iterator,const_pointer,difference_type> { typedef IteratorBiDiBase<const_iterator,const_pointer,difference_type> Base; public: const_iterator() : Base(0), mpHHS(0) {} const_iterator( const_pointer pCur, HopscotchHashSet const* pHS ) : Base(pCur-1), mpHHS(pHS) { fwd(1); } // compiler-supplied copying and destructor are OK const_reference operator*() const { return *Base::pos(); } const_pointer operator->() const { return Base::pos(); } void fwd( difference_type diff ) { if ( diff < 0 ) bwd(-diff); else while ( diff-- ) do { Base::fwd(1); } while ( mpHHS->isEmptyPosition(Base::pos()) ); } void bwd( difference_type diff ) { if ( diff < 0 ) fwd(-diff); else while ( diff-- ) do { Base::bwd(1); } while ( mpHHS->isEmptyPosition(Base::pos()) ); } private: HopscotchHashSet const* mpHHS; }; explicit HopscotchHashSet( size_type cap, HCF<T,H,C,F> const& hcf = HCF<T,H,C,F>() ) : mHCF(hcf), mCapacity(adjustCapacity(cap)), mSize(0), mBuckets(hcf.create(capacity())), mBktInfo(hcf.alloc(static_cast<BktStatus*>(nullptr)).allocate(capacity())) { memset(mBktInfo,0,capacity()); } HopscotchHashSet( HopscotchHashSet const& that ) : mHCF(that.mHCF), mCapacity(that.capacity()), mSize(0), mBuckets(that.mHCF.create(capacity())), mBktInfo(that.mHCF.alloc(static_cast<BktStatus*>(nullptr)).allocate(capacity())) { memset(mBktInfo,0,capacity()); *this = that; } ~HopscotchHashSet() { mHCF.destroy(mBuckets,capacity()); mHCF.alloc(static_cast<BktStatus*>(nullptr)).deallocate(mBktInfo,mCapacity); } HopscotchHashSet& operator=( HopscotchHashSet const& that ) { if ( this != &that ) { clear(); BktStatus const* start = that.mBktInfo; BktStatus const* end = start + that.capacity(); for ( BktStatus const* itr(start); itr != end; ++itr ) if ( itr->getStatus() == BktStatus::HEAD ) add(that.mBuckets[itr-start]); for ( BktStatus const* itr(start); itr != end; ++itr ) if ( itr->getStatus() == BktStatus::SQUATTER ) add(that.mBuckets[itr-start]); } return *this; } size_type capacity() const { return mCapacity; } size_type size() const { return mSize; } const_iterator begin() const { return const_iterator(mBuckets,this); } const_iterator end() const { return const_iterator(mBuckets+capacity(),this); } /// Returns true if the value is in the set. bool contains( key_type val ) const { return lookup(val); } /// Returns pointer to val, or null pointer if val is not in set. const_pointer lookup( key_type val ) const { return lookup(val,mHCF.hash(val)); } /// Returns true if the value was added (otherwise, it was already present). bool add( key_type val ) throw(NoRoomException) { return add(val,mHCF.hash(val)); } /// Returns a reference to the value. Inserts value, if not present. const_reference operator[]( key_type val ) throw(NoRoomException) { size_type idx = mHCF.hash(val) % capacity(); const_pointer pBkt = find(val,idx); if ( pBkt ) return *pBkt; // EARLY RETURN! return (findInsertSlot(val,idx) = val); } /// Removes the value from the set. Returns false if value not present. bool remove( key_type val ) { return remove(val,mHCF.hash(val)); } /// Removes all entries for which the functor is true by copying all the /// retained values, and swapping. template <class Functor> void remove_if( Functor func ) { HopscotchHashSet that(mCapacity,mHCF); for ( size_t idx = 0; idx != mCapacity; ++idx ) { BktStatus* pInfo = mBktInfo+idx; if ( pInfo->getStatus() == BktStatus::HEAD ) { const_pointer pBucket = mBuckets+idx; if ( !func(*pBucket) ) that.findInsertSlot(idx) = *pBucket; while ( pInfo->hasNext() ) { pInfo = next(pInfo); pBucket = mBuckets+(pInfo-mBktInfo); if ( !func(*pBucket) ) that.findInsertSlot(idx) = *pBucket; } } } swap(*this,that); } /// Removes all values from the set. HopscotchHashSet& clear() { memset(mBktInfo,0,capacity()); mSize = 0; return *this; } bool isEmptyPosition( const_pointer pBkt ) const { size_type idx = pBkt - mBuckets; return idx < capacity() && mBktInfo[idx].getStatus()==BktStatus::EMPTY; } void writeBinary( BinaryWriter& writer ) const { writer.write(mSize); writer.write(mBktInfo,mBktInfo+mCapacity); writer.write(mBuckets,mBuckets+mCapacity); } void readBinary( BinaryReader& reader ) { reader.read(&mSize); reader.read(mBktInfo,mBktInfo+mCapacity); reader.read(mBuckets,mBuckets+mCapacity); } static size_t externalSizeof() { return 0; } /// Swaps set contents. friend void swap( HopscotchHashSet& set1, HopscotchHashSet& set2 ) { using std::swap; swap(set1.mCapacity,set2.mCapacity); swap(set1.mSize,set2.mSize); swap(set1.mBuckets,set2.mBuckets); swap(set1.mBktInfo,set2.mBktInfo); swap(set1.mHCF,set2.mHCF); } // note: base class spinlock is not swapped, and that's on purpose protected: // these protected methods are used by the friendly HashSet, which follows const_pointer lookup( key_type val, size_t hash ) const { return find(val,hash%capacity()); } template <class Comp> const_pointer lookup( key_type val, size_t hash, Comp const& comp ) const { // Check home slot. const_pointer pBkt = 0; BktStatus const* pInfo = mBktInfo + hash%capacity(); if ( pInfo->getStatus() == BktStatus::HEAD ) { pBkt = &mBuckets[pInfo - mBktInfo]; if ( !comp(val,*pBkt) ) { pBkt = 0; // Check the rest of the chain, if any. while ( pInfo->hasNext() ) { pInfo = next(pInfo); const_reference bkt = mBuckets[pInfo - mBktInfo]; if ( comp(val,bkt) ) {pBkt = &bkt; break; } } } } return pBkt; } bool add( key_type val, size_t hash ) throw(NoRoomException) { size_type idx = hash%capacity(); if ( find(val,idx) ) return false; findInsertSlot(idx) = val; return true; } bool remove( key_type val, size_t hash ) { // Find the head slot, and make sure it's actually a chain head. (If it's // empty or an overflow slot, there are no keys with the right hash.) BktStatus* pInfo = mBktInfo + hash%capacity(); if ( pInfo->getStatus() != BktStatus::HEAD ) return false;// EARLY RETURN! BktStatus* pInfoPrev = 0; value_type* pBkt; // Find the right slot, where the value compares equal to the one sought. while ( true ) { pBkt = &mBuckets[pInfo - mBktInfo]; if ( mHCF.compare(val,*pBkt) ) break; if ( !pInfo->hasNext() ) return false; // EARLY RETURN! pInfoPrev = pInfo; pInfo = next(pInfo); } // At this point, pInfo and pBkt point to the right places for the value // to be deleted, and pInfoPrev is either the previous entry in the chain, // or null (when the value was found at the chain head). if ( pInfo->hasNext() ) copyAndKillEndOfChain(pInfo,pBkt); else // else at end of chain { if ( pInfoPrev ) pInfoPrev->setOffset(0); pInfo->setStatus(BktStatus::EMPTY); } mSize -= 1; return true; } const_reference insertK( key_type val, size_t hash ) throw(NoRoomException) { return (findInsertSlot(hash%capacity()) = val); } void insertV( value_type const& val, size_t hash ) throw(NoRoomException) { findInsertSlot(hash%capacity()) = val; } void insertV( value_type&& val, size_t hash ) throw(NoRoomException) { findInsertSlot(hash%capacity()) = std::move(val); } private: BktStatus* next( BktStatus* pInfo ) { pInfo = pInfo->next(); if ( pInfo < mBktInfo ) pInfo += capacity(); return pInfo; } BktStatus const* next( BktStatus const* pInfo ) const { pInfo = pInfo->next(); if ( pInfo < mBktInfo ) pInfo += capacity(); return pInfo; } size_type distance( BktStatus const* pInfo, BktStatus const* pInfo2 ) const { return pInfo2 < pInfo ? pInfo-pInfo2 : pInfo-pInfo2+capacity(); } // Walk chain to find value. const_pointer find( key_type val, size_type offset ) const { // Check home slot. const_pointer pBkt = 0; BktStatus const* pInfo = mBktInfo + offset; if ( pInfo->getStatus() == BktStatus::HEAD ) { pBkt = &mBuckets[pInfo - mBktInfo]; if ( !mHCF.compare(val,*pBkt) ) { pBkt = 0; // Check the rest of the chain, if any. while ( pInfo->hasNext() ) { pInfo = next(pInfo); const_reference bkt = mBuckets[pInfo - mBktInfo]; if ( mHCF.compare(val,bkt) ) {pBkt = &bkt; break; } } } } return pBkt; } void copyAndKillEndOfChain( BktStatus* pInfo, value_type* pBkt ) { Assert(pInfo->hasNext()); BktStatus* pInfoPrev; do { pInfoPrev = pInfo; pInfo = next(pInfo); } while ( pInfo->hasNext() ); *pBkt = std::move(mBuckets[pInfo-mBktInfo]); pInfoPrev->setOffset(0); pInfo->setStatus(BktStatus::EMPTY); } // Find a place for a value known not to be present. Idx is the chain head. value_type& findInsertSlot( size_type idx ) throw(NoRoomException) { if ( size() == capacity() ) throw NoRoomException("capacity exceeded"); BktStatus* pInfo = mBktInfo + idx; switch ( pInfo->getStatus() ) {case BktStatus::SQUATTER: // move squatter, then use now-empty slot bump(pInfo); // no break statement -- allow flow-through to next case case BktStatus::EMPTY: // just use the empty slot pInfo->setStatus(BktStatus::HEAD); break; case BktStatus::HEAD: // extend chain { while ( pInfo->hasNext() ) pInfo = next(pInfo); BktStatus* pInfo2 = findEmpty(pInfo); pInfo2->setStatus(BktStatus::SQUATTER); pInfo->setOffset(distance(pInfo,pInfo2)); pInfo = pInfo2; break; } } mSize += 1; return mBuckets[pInfo-mBktInfo]; } // move a SQUATTER key elsewhere void bump( BktStatus* pBump ) throw(NoRoomException) { AssertEq((int)pBump->getStatus(),(int)BktStatus::SQUATTER); // search for the entry that points to pBump BktStatus* pPrev = pBump; size_type off = 0; do { if ( ++pPrev >= mBktInfo+capacity() ) pPrev -= capacity(); } while ( ++off != pPrev->getOffset() ); // loop for weird edge case where pEmpty gets picked too far from // its successor-to-be BktStatus* pEmpty; size_type distToEmpty; size_type distToSucc; while ( true ) // loop for weird edge case { pEmpty = findEmpty(pPrev); distToEmpty = distance(pPrev,pEmpty); distToSucc = pPrev->getOffset() + pBump->getOffset(); if ( !pBump->hasNext() || distToEmpty > pPrev->getOffset() || distToSucc-distToEmpty <= BktStatus::maxOffset() ) break; // handle difficult case where pEmpty is too far from its successor copyAndKillEndOfChain(pBump,&mBuckets[pEmpty-mBktInfo]); pEmpty->setStatus(BktStatus::SQUATTER); // link pEmpty into chain between pPrev and pBump pEmpty->setOffset(pPrev->getOffset()-distToEmpty); pPrev->setOffset(distToEmpty); pPrev = pEmpty; } // move pBump's value to pEmpty move(pPrev,pBump,pEmpty,distToEmpty,distToSucc); } void move( BktStatus* pPrev, BktStatus* pBump, BktStatus* pEmpty, size_type distToEmpty, size_type distToSucc ) { // move pBump's value to pEmpty mBuckets[pEmpty-mBktInfo] = std::move(mBuckets[pBump-mBktInfo]); pEmpty->setStatus(BktStatus::SQUATTER); // fix up chain, as necessary if ( pBump->hasNext() ) { if ( distToSucc > distToEmpty ) pEmpty->setOffset(distToSucc-distToEmpty); else { pPrev->setOffset(distToSucc); // snip pBump out of chain // walk down chain to the end, or until pEmpty is between pPrev // and its successor do { distToEmpty -= pPrev->getOffset(); pPrev = next(pPrev); } while ( pPrev->hasNext() && pPrev->getOffset() < distToEmpty ); if ( pPrev->hasNext() ) pEmpty->setOffset(pPrev->getOffset()-distToEmpty); } } pPrev->setOffset(distToEmpty); // pBump is now free pBump->setStatus(BktStatus::EMPTY); pBump->setOffset(0); } // find empty slot within range of pInfo, or create one by hopscotchingSet BktStatus* findEmpty( BktStatus* pInfo ) throw(NoRoomException) { AssertLt(mSize,capacity()); BktStatus* pEmpty = pInfo; do { if ( --pEmpty < mBktInfo ) pEmpty += capacity(); } while ( pEmpty->getStatus() != BktStatus::EMPTY ); while ( distance(pInfo,pEmpty) > BktStatus::maxOffset() ) pEmpty = hopScotch(pEmpty); return pEmpty; } BktStatus* hopScotch( BktStatus* pEmpty ) throw (NoRoomException) { AssertEq((int)pEmpty->getStatus(),(int)BktStatus::EMPTY); BktStatus* pPrev = pEmpty + BktStatus::maxOffset(); if ( pPrev >= mBktInfo + capacity() ) pPrev -= capacity(); size_type distToEmpty = BktStatus::maxOffset(); // for each entry that's in range of the free slot while ( pPrev != pEmpty ) { // if pPrev's successor follows pEmpty (a move in the right direction) if ( pPrev->hasNext() && pPrev->getOffset() < distToEmpty ) { BktStatus* pBump = next(pPrev); size_type distToSucc = pPrev->getOffset()+pBump->getOffset(); // if the chain can be made whole after swapping pBump and pEmpty if ( !pBump->hasNext() || distToEmpty > pPrev->getOffset() || distToSucc-distToEmpty <= BktStatus::maxOffset() ) { move(pPrev,pBump,pEmpty,distToEmpty,distToSucc); return pBump; } } // EARLY RETURN! distToEmpty -= 1; if ( --pPrev < mBktInfo ) pPrev += capacity(); } throw NoRoomException("hopscotching failed"); } static size_type adjustCapacity( size_type capacity ) { if ( capacity < 2*BktStatus::maxOffset()+1 ) capacity = 2*BktStatus::maxOffset()+1; capacity |= 1; while ( !capacity%3 || !capacity%5 || !capacity%7 || !capacity%11 ) capacity += 2; return capacity; } HCF<T,H,C,F> mHCF; size_type mCapacity; size_type mSize; value_type* mBuckets; BktStatus* mBktInfo; }; template <class T, class H, class C, class F> struct Serializability< HopscotchHashSet<T,H,C,F> > { typedef SelfSerializable type; }; /// A gracefully-growable HashSet. /// Splits fixed-size hopscotch hash sets as it grows. This means that only a /// fraction of the keys might need to be reorganized for any given call to /// add() or operator[], which is smoother than reorganizing the whole /// enchilada, as is typical in size-doubling schemes. It also means that /// memory use is in a few, large chunks, rather than requiring one giant /// contiguous space. /// Template args as for HopscotchHashSet. /// /// Thread-safe for multiple writers. Thread-safe for multiple readers. /// NOT thread-safe for a mix of readers and writers. template <class T, class H=std::hash<T>, class C=std::equal_to<T>, class F=TFactory<T> > class HashSet : private SpinLockedData { public: typedef T value_type; typedef value_type const* const_pointer; typedef value_type const& const_reference; typedef size_t size_type; typedef std::ptrdiff_t difference_type; typedef H hash_func_type; typedef C comp_func_type; typedef F factory_type; typedef typename H::argument_type const& key_type; typedef HopscotchHashSet<T,H,C,F> HHS; typedef HHS*volatile* PPHHS; typedef typename HHS::NoRoomException NoRoomException; typedef typename HHS::const_iterator ICItr; // This is an iterator over the HopscotchHashSets. // Sorry to expose this bit of internal organization, but it seems simpler. // To iterate over entries, you'll need a nested for loop. class iterator : public std::iterator<std::bidirectional_iterator_tag,HHS>, public IteratorBiDiBase<iterator,PPHHS,difference_type> { typedef IteratorBiDiBase<iterator,PPHHS,difference_type> Base; public: iterator() : Base(0), mpHS(0) {} iterator( PPHHS pCur, HashSet* pHS ) : Base(pCur-1), mpHS(pHS) { fwd(1); } // compiler-supplied copying and destructor are OK HHS& operator*() const { return **Base::pos(); } HHS* operator->() const { return *Base::pos(); } void fwd( difference_type diff ) { if ( diff < 0 ) bwd(-diff); else while ( diff-- ) do { Base::fwd(1); } while ( mpHS->isEmptyPosition(Base::pos()) ); } void bwd( difference_type diff ) { if ( diff < 0 ) fwd(-diff); else while ( diff-- ) do { Base::bwd(1); } while ( mpHS->isEmptyPosition(Base::pos()) ); } private: HashSet* mpHS; }; // This is a const_iterator over the (const) HopscotchHashSets. // Sorry to expose this bit of internal organization, but it seems simpler. // To iterate over entries, you'll need a nested for loop. class const_iterator : public std::iterator<std::bidirectional_iterator_tag,HHS const>, public IteratorBiDiBase<const_iterator,PPHHS,difference_type> { typedef IteratorBiDiBase<const_iterator,PPHHS,difference_type> Base; public: const_iterator() : Base(0), mpHS(0) {} const_iterator( PPHHS pCur, HashSet const* pHS ) : Base(pCur-1), mpHS(pHS) { fwd(1); } // compiler-supplied copying and destructor are OK HHS const& operator*() const { return **Base::pos(); } HHS const* operator->() const { return *Base::pos(); } void fwd( difference_type diff ) { if ( diff < 0 ) bwd(-diff); else while ( diff-- ) do { Base::fwd(1); } while ( mpHS->isEmptyPosition(Base::pos()) ); } void bwd( difference_type diff ) { if ( diff < 0 ) fwd(-diff); else while ( diff-- ) do { Base::bwd(1); } while ( mpHS->isEmptyPosition(Base::pos()) ); } private: HashSet const* mpHS; }; explicit HashSet( size_t cap, hash_func_type const& hasher = hash_func_type(), comp_func_type const& comper = comp_func_type(), factory_type const& factory = factory_type(), double maxLoadFactor=.75 ) : mHCF(hasher,comper,factory) { init(cap,maxLoadFactor); } HashSet( HashSet const& that ) : mHCF(that.mHCF) { init(that.size()); const_iterator end(that.end()); for ( const_iterator itr(that.begin()); itr != end; ++itr ) add(itr->begin(),itr->end()); } ~HashSet() { destroy(); } HashSet& operator=( HashSet const& that ) { if ( this != &that ) { clear(); const_iterator end(that.end()); for ( const_iterator itr(that.begin()); itr != end; ++itr ) add(itr->begin(),itr->end()); } return *this; } size_type size() const { size_t totSize = 0; PPHHS end(mppHHS+mCapacity); for ( PPHHS itr(mppHHS); itr != end; ++itr ) if ( *itr ) totSize += (*itr)->size(); return totSize; } factory_type const& getFactory() const { return mHCF; } hash_func_type const& getHasher() const { return mHCF; } comp_func_type const& getComparator() const { return mHCF; } const_iterator begin() const { return const_iterator(mppHHS,this); } const_iterator end() const { return const_iterator(mppHHS+mCapacity,this); } const_iterator cbegin() { return const_iterator(mppHHS,this); } const_iterator cend() { return const_iterator(mppHHS+mCapacity,this); } iterator begin() { return iterator(mppHHS,this); } iterator end() { return iterator(mppHHS+mCapacity,this); } /// Returns true if the value is in the set. /// Not safe to call when there are writers lurking about. Fine in a /// readers-only context. Sorry about the ridiculous rules for using the /// API for this class. bool contains( key_type val ) const { return lookup(val); } /// Returns pointer to val, or null pointer if val is not in set. /// Not safe to call when there are writers lurking about. Fine in a /// readers-only context. Sorry about the ridiculous rules for using the /// API for this class. const_pointer lookup( key_type val ) const { size_t hash = mHCF.hash(val); return findHHSC(hash)->lookup(val,hash); } template <class Comp> const_pointer lookup( size_t hash, key_type val, Comp const& comp ) const { return findHHSC(hash)->lookup(val,hash,comp); } /// Returns true if the value was added (otherwise, it was already present). bool add( key_type val ) { size_t hash = mHCF.hash(val); HHS* pHHS = findHHS(hash); if ( pHHS->lookup(val,hash) ) { pHHS->unlock(); return false; } insert(val,hash,pHHS); return true; } template <class Itr> void add( Itr itr, Itr end ) { while ( itr != end ) add(*itr); } const_pointer add( size_t hash, key_type val ) { HHS* pHHS = findHHS(hash); const_pointer ppp = pHHS->lookup(val,hash); if ( ppp ) { pHHS->unlock(); return ppp; } // EARLY RETURN! return &insert(val,hash,pHHS); } /// Applies functor to entry. Inserts value, if not present. Threadsafe. template <class Func> void apply( key_type val, Func func ) { apply(mHCF.hash(val),val,func); } /// Applies functor to entry. Inserts value, if not present. Threadsafe. template <class Func> void apply( size_t hash, key_type val, Func func ) { HHS* pHHS = findHHS(hash); const_pointer pEnt = pHHS->lookup(val,hash); if ( !pEnt ) pEnt = &insertLocked(val,hash,pHHS); func(*pEnt); pHHS->unlock(); } /// Returns a reference to the value. Inserts value, if not present. const_reference operator[]( key_type val ) { size_t hash = mHCF.hash(val); HHS* pHHS = findHHS(hash); const_pointer ppp = pHHS->lookup(val,hash); if ( ppp ) { pHHS->unlock(); return *ppp; } // EARLY RETURN! return insert(val,hash,pHHS); } /// Specialized inserter: Inserts value_type rather than key, /// does not check for uniqueness. This is dangerous, and would typically /// be used only when copying values known to comprise a set into a /// hashtable -- perhaps in a reduce function for the MapReduceEngine. void insertUniqueValue( value_type const& val ) { size_t hash = mHCF.hash(val); HHS* pHHS = findHHS(hash); while ( true ) { try { pHHS->insertV(val,hash); pHHS->unlock(); break; } catch ( NoRoomException const& ) { pHHS = split(hash); } } } /// Specialized inserter: No locking, inserts value_type rather than key, /// does not check for uniqueness. This is dangerous, and would typically /// be used only when copying values known to comprise a set into a /// hashtable from a single thread -- perhaps when you're reading a cached /// binary file. void insertUniqueValueNoLocking( value_type const& val ) { size_t hash = mHCF.hash(val); HHS* pHHS = findHHSC(hash); while ( true ) { try { pHHS->insertV(val,hash); break; } catch ( NoRoomException const& ) { pHHS = split(hash); } } } /// Removes the value from the set. Returns false if value not present. bool remove( key_type val ) { size_t hash = mHCF.hash(val); HHS* pHHS = findHHS(hash); bool result = pHHS->remove(val,hash); pHHS->unlock(); return result; } /// Removes the value from the set. Returns false if value not present. /// Not thread-safe for obvious reasons. bool removeNoLocking( key_type val ) { size_t hash = mHCF.hash(val); HHS* pHHS = findHHSC(hash); return pHHS->remove(val,hash); } /// Removes all entries for which the functor returns true. /// This effectively rebuilds the entire table in place, and isn't a good /// idea if you're removing a small fraction of the entries. (Iterate, /// and call remove instead.) Not thread safe. template <class Functor> void remove_if( Functor const& func ) { for ( PPHHS itr(mppHHS), end(mppHHS+mCapacity); itr != end; ++itr ) if ( *itr ) (*itr)->remove_if(func); } /// Delete all values. NB: No locking! You can't call clear when there /// are simultaneous writers. HashSet& clear() { PPHHS end = mppHHS + mCapacity; for ( PPHHS itr(mppHHS); itr != end; ++itr ) if ( *itr ) (*itr)->clear(); return *this; } bool isEmptyPosition( PPHHS ppHHS ) const { size_type idx = ppHHS - mppHHS; return idx < mCapacity && !*ppHHS; } void writeBinary( BinaryWriter& writer ) const { size_type cap = mCapacity; writer.write(cap); writer.write(mInnerCapacity); PPHHS end = mppHHS + mCapacity; for ( PPHHS itr = mppHHS; itr != end; ++itr ) { if ( !*itr ) writer.write(false); else { writer.write(true); writer.write(**itr); } } } void readBinary( BinaryReader& reader ) { destroy(); size_type cap; reader.read(&cap); ForceAssert(cap && !(cap & (cap-1))); mCapacity = cap; reader.read(&mInnerCapacity); mppHHS = allocArray(mCapacity); memset((void*)mppHHS.load(),0,mCapacity*sizeof(HHS*)); PPHHS end = mppHHS + mCapacity; for ( PPHHS itr = mppHHS; itr != end; ++itr ) { bool populated; reader.read(&populated); if ( populated ) { *itr = createHHS(); reader.read(*itr); } } } static size_t externalSizeof() { return 0; } friend bool operator==( HashSet const& set1, HashSet const& set2 ) { bool result = set1.size() == set2.size(); for ( const_iterator oItr(set1.begin()), oEnd(set1.end()); result && oItr != oEnd; ++oItr ) for ( ICItr iItr(oItr->begin()), iEnd(oItr->end()); result && iItr != iEnd; ++iItr ) { const_pointer pEnt = set2.lookup(*iItr); result = pEnt && *pEnt == *iItr; } return result; } friend bool operator!=( HashSet const& set1, HashSet const& set2 ) { return !(set1 == set2); } /// returns number of misassignments size_t validateBinAssignments() const { size_t result = 0; for ( PPHHS oitr=mppHHS,oend=mppHHS+mCapacity; oitr != oend; ++oitr ) { if ( !*oitr ) continue; for ( ICItr itr=oitr[0]->begin(),end=oitr[0]->end(); itr != end; ++itr ) { if ( findHHSC(mHCF.hash(*itr)) != *oitr ) result += 1; } } return result; } template <class Proc> void parallelForEachHHS( Proc proc, size_t nThreads = getConfiguredNumThreads() ) const { PPHHS ppHHS = mppHHS; #pragma omp parallel { Proc p(proc); #pragma omp for for (size_type idx = 0; idx < mCapacity; idx++) { HHS const *pHHS = ppHHS[idx]; if (pHHS) p(*pHHS); } } } private: HHS* createHHS() { HHS* pHHS = mHCF.alloc(static_cast<HHS*>(nullptr)).allocate(1); new (pHHS) HHS(mInnerCapacity,mHCF); return pHHS; } PPHHS allocArray( size_t nnn ) { return (PPHHS)mHCF.alloc(static_cast<HHS**>(nullptr)).allocate(nnn); } void deallocArray( PPHHS ppHHS, size_t nnn ) { mHCF.alloc(static_cast<HHS**>(nullptr)).deallocate((HHS**)ppHHS,nnn); } void init( size_t cap, double maxLoadFactor ) { int capCeilLg2 = PowerOf2::ceilLg2(cap/maxLoadFactor); int innerCapLg2 = std::max(10,(capCeilLg2+1)/2); int outerCapLg2 = std::max(7,capCeilLg2-innerCapLg2); mCapacity = 1ul << outerCapLg2; mppHHS = allocArray(mCapacity); PPHHS ppp = mppHHS + mCapacity; mInnerCapacity = PowerOf2::getNearbyPrime(innerCapLg2); size_t allocCap = mCapacity*mInnerCapacity; //if(cap>1000000) //std::cout << "HashSet requested size=" << cap << " allocated size=" // << allocCap << " load factor=" << 1.*cap/allocCap << std::endl; while ( ppp != mppHHS ) *--ppp = createHHS(); mSplitCount = 0; } // no locking: therefore only useful when there are not multiple writers // that might cause table reorganizations. HHS* findHHSC( size_t hash ) const { size_t mask = mCapacity - 1; hash ^= hash >> 32; HHS* result; PPHHS ppHHS = mppHHS; while ( !(result = ppHHS[hash&mask]) ) mask >>= 1; return result; } // return with HHS locked HHS* findHHS( size_t hash ) { hash ^= hash >> 32; HHS* pHHS; while ( true ) { unsigned splitCount = mSplitCount; size_t mask = mCapacity - 1; PPHHS ppHHS = mppHHS; while ( !(pHHS = ppHHS[hash & mask]) ) mask >>= 1; pHHS->lock(); if ( splitCount == mSplitCount ) break; pHHS->unlock(); } return pHHS; } const_reference insert( key_type val, size_t hash, HHS* pHHS ) { while ( true ) { try { const_reference result = pHHS->insertK(val,hash); pHHS->unlock(); return result; } catch ( NoRoomException const& ) { pHHS = split(hash); } } } const_reference insertLocked( key_type val, size_t hash, HHS*& pHHS ) { while ( true ) { try { return pHHS->insertK(val,hash); } catch ( NoRoomException const& ) { pHHS = split(hash); } } } HHS* split( size_t hash ) { HHS* pHi = createHHS(); pHi->lock(); lock(); size_t capacity = mCapacity; size_t mask = capacity - 1; hash ^= hash >> 32; while ( !mppHHS[hash&mask] ) mask >>= 1; size_t idxLo = hash&mask++; while ( !mppHHS[idxLo|(mask>>1)] ) mask >>= 1; size_t idxHi = idxLo | mask; if ( idxHi >= capacity ) { PPHHS ppHHS = allocArray(capacity<<1); memcpy((void*)ppHHS,(void*)mppHHS.load(),capacity*sizeof(HHS*)); memset((void*)(ppHHS+capacity),0,capacity*sizeof(HHS*)); mOldppHHSs.push_front((PPHHS)mppHHS); mppHHS = ppHHS; mCapacity = 2*capacity; } mppHHS[idxHi] = pHi; ++mSplitCount; unlock(); HHS* pLo = mppHHS[idxLo]; HHS tmp(mInnerCapacity,mHCF); for ( ICItr itr(pLo->begin()), end(pLo->end()); itr != end; ++itr ) { value_type& val = const_cast<value_type&>(*itr); size_t hash2 = mHCF.hash(val); (((hash2^(hash2>>32))&mask)?pHi:&tmp)->insertV(std::move(val),hash2); } if ( hash&mask ) { swap(*pLo,tmp); pLo->unlock(); pLo = pHi; } else { pHi->unlock(); swap(*pLo,tmp); } return pLo; } void destroy() { PPHHS end = mppHHS+mCapacity; size_t nHHS = 0; for ( PPHHS itr(mppHHS); itr != end; ++itr ) { HHS* pHHS = *itr; if ( pHHS ) { pHHS->~HHS(); mHCF.alloc(pHHS).deallocate(pHHS,1); nHHS += 1; } } deallocArray(mppHHS,mCapacity); size_type capacity = mCapacity; for ( auto itr=mOldppHHSs.begin(), end=mOldppHHSs.end(); itr!=end; ++itr ) deallocArray(*itr,capacity/=2); mOldppHHSs.clear(); if ( 1.*nHHS/(nHHS-mSplitCount) > 1.3 ) std::cout << "Warning: HashSet initial size too small." << std::endl; } std::atomic<PPHHS> mppHHS; // length == mCapacity std::atomic<size_type> mCapacity; // number of HHS's, not max values: // always a power of 2 std::atomic<unsigned> mSplitCount;// counter of the number of splits unsigned mInnerCapacity; // prime number just < than a power of 2 std::list<PPHHS> mOldppHHSs; HCF<T,H,C,F> mHCF; }; template <class T, class H, class C, class F> struct Serializability< HashSet<T,H,C,F> > { typedef SelfSerializable type; }; #endif /* FEUDAL_HASHSET_H */
GB_unop__identity_uint64_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_int64) // op(A') function: GB (_unop_tran__identity_uint64_int64) // C type: uint64_t // A type: int64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_int64) ( uint64_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ast-dump-openmp-for.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp for for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp for for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp for collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp for collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp for collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl 0x{{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl 0x{{.*}} <{{.*}}ast-dump-openmp-for.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl 0x{{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt 0x{{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPForDirective 0x{{.*}} <line:4:9, col:16> // CHECK-NEXT: | `-CapturedStmt 0x{{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl 0x{{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt 0x{{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt 0x{{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl 0x{{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator 0x{{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr 0x{{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr 0x{{.*}} <col:19> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr 0x{{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:23> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator 0x{{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:26> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt 0x{{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl 0x{{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:4:9) *const restrict' // CHECK-NEXT: | | `-VarDecl 0x{{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr 0x{{.*}} <col:3> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl 0x{{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl 0x{{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl 0x{{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt 0x{{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPForDirective 0x{{.*}} <line:10:9, col:16> // CHECK-NEXT: | `-CapturedStmt 0x{{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl 0x{{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt 0x{{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt 0x{{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl 0x{{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator 0x{{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr 0x{{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr 0x{{.*}} <col:19> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr 0x{{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:23> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator 0x{{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:26> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt 0x{{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt 0x{{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl 0x{{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator 0x{{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr 0x{{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr 0x{{.*}} <col:21> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr 0x{{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:25> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator 0x{{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:28> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt 0x{{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl 0x{{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:10:9) *const restrict' // CHECK-NEXT: | | |-VarDecl 0x{{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl 0x{{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr 0x{{.*}} <line:11:3> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr 0x{{.*}} <line:12:25> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl 0x{{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl 0x{{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl 0x{{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt 0x{{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPForDirective 0x{{.*}} <line:17:9, col:28> // CHECK-NEXT: | |-OMPCollapseClause 0x{{.*}} <col:17, col:27> // CHECK-NEXT: | | `-ConstantExpr 0x{{.*}} <col:26> 'int' // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:26> 'int' 1 // CHECK-NEXT: | `-CapturedStmt 0x{{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl 0x{{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt 0x{{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt 0x{{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl 0x{{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator 0x{{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr 0x{{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr 0x{{.*}} <col:19> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr 0x{{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:23> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator 0x{{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:26> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt 0x{{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt 0x{{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl 0x{{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator 0x{{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr 0x{{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr 0x{{.*}} <col:21> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr 0x{{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:25> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator 0x{{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:28> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt 0x{{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl 0x{{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:17:9) *const restrict' // CHECK-NEXT: | | |-VarDecl 0x{{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl 0x{{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr 0x{{.*}} <line:18:3> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr 0x{{.*}} <line:19:25> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl 0x{{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl 0x{{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl 0x{{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt 0x{{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPForDirective 0x{{.*}} <line:24:9, col:28> // CHECK-NEXT: | |-OMPCollapseClause 0x{{.*}} <col:17, col:27> // CHECK-NEXT: | | `-ConstantExpr 0x{{.*}} <col:26> 'int' // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:26> 'int' 2 // CHECK-NEXT: | `-CapturedStmt 0x{{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl 0x{{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt 0x{{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt 0x{{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl 0x{{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator 0x{{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr 0x{{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr 0x{{.*}} <col:19> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr 0x{{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:23> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator 0x{{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:26> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt 0x{{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt 0x{{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl 0x{{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator 0x{{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr 0x{{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr 0x{{.*}} <col:21> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr 0x{{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:25> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator 0x{{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:28> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt 0x{{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl 0x{{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:24:9) *const restrict' // CHECK-NEXT: | | |-VarDecl 0x{{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl 0x{{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr 0x{{.*}} <line:25:3> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr 0x{{.*}} <line:26:5> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl 0x{{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl 0x{{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl 0x{{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl 0x{{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt 0x{{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPForDirective 0x{{.*}} <line:31:9, col:28> // CHECK-NEXT: |-OMPCollapseClause 0x{{.*}} <col:17, col:27> // CHECK-NEXT: | `-ConstantExpr 0x{{.*}} <col:26> 'int' // CHECK-NEXT: | `-IntegerLiteral 0x{{.*}} <col:26> 'int' 2 // CHECK-NEXT: `-CapturedStmt 0x{{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl 0x{{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt 0x{{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt 0x{{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl 0x{{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator 0x{{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr 0x{{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:19> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr 0x{{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr 0x{{.*}} <col:23> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator 0x{{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr 0x{{.*}} <col:26> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt 0x{{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt 0x{{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl 0x{{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator 0x{{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr 0x{{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:21> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr 0x{{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr 0x{{.*}} <col:25> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator 0x{{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr 0x{{.*}} <col:28> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt 0x{{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | |-DeclStmt 0x{{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl 0x{{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral 0x{{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator 0x{{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr 0x{{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:23> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr 0x{{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr 0x{{.*}} <col:27> 'int' lvalue ParmVar 0x{{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator 0x{{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr 0x{{.*}} <col:30> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt 0x{{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl 0x{{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:31:9) *const restrict' // CHECK-NEXT: | |-VarDecl 0x{{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl 0x{{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl 0x{{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral 0x{{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr 0x{{.*}} <line:32:3> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr 0x{{.*}} <line:33:5> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr 0x{{.*}} <line:34:27> 'int' lvalue ParmVar 0x{{.*}} 'z' 'int'
fci_contract_nosym.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> * * Paticle permutation symmetry for 2e Hamiltonian only * h2e[i,j,k,l] == h2e[k,l,i,j] * h2e[i,j,k,l] =/= h2e[j,i,k,l] =/= h2e[i,j,l,k] ... */ #include <stdlib.h> #include <string.h> //#include <omp.h> #include "config.h" #include "vhf/fblas.h" #include "fci.h" #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define CSUMTHR 1e-28 #define STRB_BLKSIZE 112 double FCI_t1ci_sf(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb); void FCIcontract_a_1e_nosym(double *h1e, double *ci0, double *ci1, int norb, int nstra, int nstrb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int j, k, i, a, sign; size_t str0, str1; double *pci0, *pci1; double tmp; _LinkT *tab; _LinkT *clink = malloc(sizeof(_LinkT) * nlinka * nstra); FCIcompress_link(clink, link_indexa, norb, nstra, nlinka); for (str0 = 0; str0 < nstra; str0++) { tab = clink + str0 * nlinka; for (j = 0; j < nlinka; j++) { a = EXTRACT_CRE (tab[j]); // propagate from t1 to bra, through a^+ i i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci0 = ci0 + str0 * nstrb; pci1 = ci1 + str1 * nstrb; tmp = sign * h1e[a*norb+i]; for (k = 0; k < nstrb; k++) { pci1[k] += tmp * pci0[k]; } } } free(clink); } void FCIcontract_b_1e_nosym(double *h1e, double *ci0, double *ci1, int norb, int nstra, int nstrb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int j, k, i, a, sign; size_t str0, str1; double *pci1; double tmp; _LinkT *tab; _LinkT *clink = malloc(sizeof(_LinkT) * nlinkb * nstrb); FCIcompress_link(clink, link_indexb, norb, nstrb, nlinkb); for (str0 = 0; str0 < nstra; str0++) { pci1 = ci1 + str0 * nstrb; for (k = 0; k < nstrb; k++) { tab = clink + k * nlinkb; tmp = ci0[str0*nstrb+k]; for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci1[str1] += sign * tmp * h1e[a*norb+i]; } } } free(clink); } static void spread_a_t1(double *ci1, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkT *clink_indexa) { ci1 += strb_id; const int nnorb = norb * norb; int j, k, i, a, str1, sign; const _LinkT *tab = clink_indexa + stra_id * nlinka; double *cp0, *cp1; for (j = 0; j < nlinka; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); cp0 = t1 + a*norb+i; // propagate from t1 to bra, through a^+ i cp1 = ci1 + str1*(size_t)nstrb; if (sign > 0) { for (k = 0; k < bcount; k++) { cp1[k] += cp0[k*nnorb]; } } else { for (k = 0; k < bcount; k++) { cp1[k] -= cp0[k*nnorb]; } } } } static void spread_b_t1(double *ci1, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinkb, _LinkT *clink_indexb) { const int nnorb = norb * norb; int j, i, a, str0, str1, sign; const _LinkT *tab = clink_indexb + strb_id * nlinkb; double *pci = ci1 + stra_id * (size_t)nstrb; for (str0 = 0; str0 < bcount; str0++) { for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); // propagate from t1 to bra, through a^+ i pci[str1] += sign * t1[a*norb+i]; } t1 += nnorb; tab += nlinkb; } } static void ctr_rhf2e_kern(double *eri, double *ci0, double *ci1, double *ci1buf, double *t1buf, int bcount_for_spread_a, int ncol_ci1buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int nnorb = norb * norb; double *t1 = t1buf; double *vt1 = t1buf + nnorb*bcount; double csum; csum = FCI_t1ci_sf(ci0, t1, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); if (csum > CSUMTHR) { dgemm_(&TRANS_N, &TRANS_N, &nnorb, &bcount, &nnorb, &D1, eri, &nnorb, t1, &nnorb, &D0, vt1, &nnorb); spread_b_t1(ci1, vt1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); spread_a_t1(ci1buf, vt1, bcount_for_spread_a, stra_id, 0, norb, ncol_ci1buf, nlinka, clink_indexa); } } static void axpy2d(double *out, double *in, int count, int no, int ni) { int i, j; for (i = 0; i < count; i++) { for (j = 0; j < ni; j++) { out[i*no+j] += in[i*ni+j]; } } } void FCIcontract_2es1(double *eri, double *ci0, double *ci1, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { _LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na); _LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb); FCIcompress_link(clinka, link_indexa, norb, na, nlinka); FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb); memset(ci1, 0, sizeof(double)*na*nb); #pragma omp parallel default(none) \ shared(eri, ci0, ci1, norb, na, nb, nlinka, nlinkb, \ clinka, clinkb) { int strk, ib, blen; double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*norb*2+2)); double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2)); for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static) for (strk = 0; strk < na; strk++) { ctr_rhf2e_kern(eri, ci0, ci1, ci1buf, t1buf, blen, blen, blen, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } #pragma omp critical axpy2d(ci1+ib, ci1buf, na, nb, blen); #pragma omp barrier } free(ci1buf); free(t1buf); } free(clinka); free(clinkb); }
gather_mm.h
/*! * Copyright (c) 2022 by Contributors * \file array/cpu/gather_mm.h * \brief GATHER_MM CPU kernel function header. */ #ifndef DGL_ARRAY_CPU_GATHER_MM_H_ #define DGL_ARRAY_CPU_GATHER_MM_H_ #include <dgl/array.h> #include <dgl/bcast.h> #include <utility> namespace dgl { namespace aten { namespace cpu { template <typename DType> void transpose(const DType *in, DType *out, const int N, const int M) { #pragma omp parallel for for (int n = 0; n < N * M; n++) { int i = n / N; int j = n % N; out[n] = in[M * j + i]; } } template <typename DType> void matmul(const DType *A, const DType *B, DType *C, const int M, const int N, const int K) { #pragma omp parallel { int i, j, k; #pragma omp for for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { DType local_accum = 0; for (k = 0; k < K; k++) { local_accum += A[i * K + k] * B[k * N + j]; } C[i * N + j] = local_accum; } } } } /*! * \brief CPU kernel of Gather_mm. The input matrix A is expected to be * sorted according to relation type. * \param A The input dense matrix of dimension m x k * \param B The input dense matrix of dimension k x n * \param C The output dense matrix od dimension m x n * \param A_dim1_per_rel The number of rows in each relation in A * \param B_dim1_per_rel The number of rows in each relation in B * \param a_trans Matrix A to be transposed * \param b_trans Matrix B to be transposed */ template <int XPU, typename IdType, typename DType> void gatherMM_SortedEtype(const NDArray A, const NDArray B, NDArray C, const NDArray A_dim1_per_rel, const NDArray B_dim1_per_rel, bool a_trans, bool b_trans) { assert(A_dim1_per_rel.NumElements() == B_dim1_per_rel.NumElements()); int64_t num_rel = A_dim1_per_rel.NumElements(); const DType *A_data = A.Ptr<DType>(); const DType *B_data = B.Ptr<DType>(); const IdType* A_rel_data = A_dim1_per_rel.Ptr<IdType>(); const IdType* B_rel_data = B_dim1_per_rel.Ptr<IdType>(); DType *C_data = C.Ptr<DType>(); int64_t A_offset = 0, B_offset = 0, C_offset = 0; int64_t m, n, k, h_col, w_row; for (int etype = 0; etype < num_rel; ++etype) { assert((a_trans) ? A_rel_data[etype] : A->shape[1] == \ (b_trans) ? B->shape[1] : B_rel_data[etype]); m = A_rel_data[etype]; // rows of A n = B->shape[1]; // cols of B k = B_rel_data[etype]; // rows of B == cols of A NDArray A_trans, B_trans; if (a_trans) { A_trans = NDArray::Empty({m * k}, A->dtype, A->ctx); transpose<DType>(A_data + A_offset, static_cast<DType *>(A_trans->data), m, k); } if (b_trans) { B_trans = NDArray::Empty({k * n}, B->dtype, B->ctx); transpose<DType>(B_data + B_offset, static_cast<DType *>(B_trans->data), k, n); } if (a_trans || b_trans) { int64_t tmp = k; if (a_trans) std::swap(m, k); if (b_trans) { k = tmp; std::swap(n, k); } } matmul<DType>( (a_trans) ? static_cast<DType *>(A_trans->data) : A_data + A_offset, (b_trans) ? static_cast<DType *>(B_trans->data) : B_data + B_offset, C_data + C_offset, m, n, k); A_offset += m * k; B_offset += k * n; C_offset += m * n; } } } // namespace cpu } // namespace aten } // namespace dgl #endif // DGL_ARRAY_CPU_GATHER_MM_H_
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 16; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(32*t2-Nz-12,16)),t1);t3<=min(min(min(floord(Nt+Ny-4,16),floord(16*t1+Ny+29,16)),floord(32*t2+Ny+28,16)),floord(32*t1-32*t2+Nz+Ny+27,16));t3++) { for (t4=max(max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32)),ceild(16*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(16*t1+Nx+29,32)),floord(32*t2+Nx+28,32)),floord(16*t3+Nx+12,32)),floord(32*t1-32*t2+Nz+Nx+27,32));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),16*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),16*t3+14),32*t4+30),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
task-two.c
/* * task-two.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s // REQUIRES: tsan #include <omp.h> #include <stdio.h> #include <unistd.h> #define NUM_THREADS 2 int main(int argc, char *argv[]) { int var = 0; int i; #pragma omp parallel for num_threads(NUM_THREADS) shared(var) \ schedule(static, 1) for (i = 0; i < NUM_THREADS; i++) { #pragma omp task shared(var) if (0) // the task is inlined an executed locally { var++; } } int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}task-two.c:30 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}task-two.c:30 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
sync_progress.c
/* * Copyright (c) 2009, 2010, 2011, ETH Zurich. * All rights reserved. * * This file is distributed under the terms in the attached LICENSE file. * If you do not find this file, copies can be found by writing to: * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group. */ #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <stdint.h> #include <omp.h> #include <arch/x86/barrelfish_kpi/asm_inlines_arch.h> #include <barrelfish/barrelfish.h> #include <string.h> #undef GANG_SCHEDULING #define PERIOD 2500000000UL #define STACK_SIZE (64 * 1024) #define ITERATIONS 120 extern uint64_t stuck[64]; static uint64_t workcnt[32][ITERATIONS]; int main(int argc, char *argv[]) { int nthreads = omp_get_max_threads(); if(argc == 2) { nthreads = atoi(argv[1]); backend_span_domain(nthreads, STACK_SIZE); bomp_custom_init(); omp_set_num_threads(nthreads); } printf("threads %d, CPUs %d\n", nthreads, omp_get_num_procs()); volatile uint64_t exittime[ITERATIONS] = { 0 }; // Do some work #pragma omp parallel { #ifdef GANG_SCHEDULING bomp_synchronize(); #endif for(int i = 0; i < ITERATIONS; i++) { uint64_t start = rdtsc(); uint64_t workcn = 0; for(uint64_t n = 0;; n++) { #pragma omp barrier workcn++; if(omp_get_thread_num() == 0 && exittime[i] == 0 && rdtsc() >= start + PERIOD) { exittime[i] = n + 3; } if(exittime[i] != 0 && exittime[i] == n) { n++; break; } } /* char buf[64]; */ /* sprintf(buf, "%d: %lu(%lu)\n", omp_get_thread_num(), workcn, */ /* stuck[omp_get_thread_num()]); */ /* sys_print(buf, strlen(buf)); */ /* stuck[omp_get_thread_num()] = 0; */ workcnt[omp_get_thread_num()][i] = workcn; } } char buf[64]; for(int i = 0; i < ITERATIONS; i++) { for(int n = 0; n < nthreads; n++) { sprintf(buf, "%lu ", workcnt[n][i]); sys_print(buf, strlen(buf)); } sys_print("\n", 1); } /* sys_print("\n", 1); */ /* char buf[128], buf1[128]; */ /* sprintf(buf, "iterations in %lu ticks: ", PERIOD); */ /* for(int i = 0; i < nthreads; i++) { */ /* sprintf(buf1, "%lu ", workcnt[i]); */ /* strcat(buf, buf1); */ /* } */ /* sprintf(buf1, "\n"); */ /* strcat(buf, buf1); */ /* sys_print(buf, strlen(buf)); */ /* } */ for(;;); return 0; }
PerturbField.c
// Re-write of perturb_field.c for being accessible within the MCMC int ComputePerturbField( float redshift, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct InitialConditions *boxes, struct PerturbedField *perturbed_field ){ /* ComputePerturbField uses the first-order Langragian displacement field to move the masses in the cells of the density field. The high-res density field is extrapolated to some high-redshift (global_params.INITIAL_REDSHIFT), then uses the zeldovich approximation to move the grid "particles" onto the lower-res grid we use for the maps. Then we recalculate the velocity fields on the perturbed grid. */ int status; Try{ // This Try{} wraps the whole function, so we don't indent. // Makes the parameter structs visible to a variety of functions/macros // Do each time to avoid Python garbage collection issues Broadcast_struct_global_PS(user_params,cosmo_params); Broadcast_struct_global_UF(user_params,cosmo_params); omp_set_num_threads(user_params->N_THREADS); fftwf_complex *HIRES_density_perturb, *HIRES_density_perturb_saved; fftwf_complex *LOWRES_density_perturb, *LOWRES_density_perturb_saved; float growth_factor, displacement_factor_2LPT, init_growth_factor, init_displacement_factor_2LPT, xf, yf, zf; float mass_factor, dDdt, f_pixel_factor, velocity_displacement_factor, velocity_displacement_factor_2LPT; unsigned long long ct, HII_i, HII_j, HII_k; int i,j,k, xi, yi, zi, dimension, switch_mid; double ave_delta, new_ave_delta; // Function for deciding the dimensions of loops when we could // use either the low or high resolution grids. switch(user_params->PERTURB_ON_HIGH_RES) { case 0: dimension = user_params->HII_DIM; switch_mid = HII_MIDDLE; break; case 1: dimension = user_params->DIM; switch_mid = MIDDLE; break; } // *************** BEGIN INITIALIZATION ************************** // // perform a very rudimentary check to see if we are underresolved and not using the linear approx if ((user_params->BOX_LEN > user_params->DIM) && !(global_params.EVOLVE_DENSITY_LINEARLY)){ LOG_WARNING("Resolution is likely too low for accurate evolved density fields\n \ It is recommended that you either increase the resolution (DIM/BOX_LEN) or set the EVOLVE_DENSITY_LINEARLY flag to 1\n"); } growth_factor = dicke(redshift); displacement_factor_2LPT = -(3.0/7.0) * growth_factor*growth_factor; // 2LPT eq. D8 dDdt = ddickedt(redshift); // time derivative of the growth factor (1/s) init_growth_factor = dicke(global_params.INITIAL_REDSHIFT); init_displacement_factor_2LPT = -(3.0/7.0) * init_growth_factor*init_growth_factor; // 2LPT eq. D8 // find factor of HII pixel size / deltax pixel size f_pixel_factor = user_params->DIM/(float)(user_params->HII_DIM); mass_factor = pow(f_pixel_factor, 3); // allocate memory for the updated density, and initialize LOWRES_density_perturb = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); LOWRES_density_perturb_saved = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); if(user_params->PERTURB_ON_HIGH_RES) { HIRES_density_perturb = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS); HIRES_density_perturb_saved = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS); } double *resampled_box; debugSummarizeIC(boxes, user_params->HII_DIM, user_params->DIM); LOG_SUPER_DEBUG("growth_factor=%f, displacemet_factor_2LPT=%f, dDdt=%f, init_growth_factor=%f, init_displacement_factor_2LPT=%f, mass_factor=%f", growth_factor, displacement_factor_2LPT, dDdt, init_growth_factor, init_displacement_factor_2LPT, mass_factor); // check if the linear evolution flag was set if (global_params.EVOLVE_DENSITY_LINEARLY){ LOG_DEBUG("Linearly evolve density field"); #pragma omp parallel shared(growth_factor,boxes,LOWRES_density_perturb,HIRES_density_perturb,dimension) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<dimension; i++){ for (j=0; j<dimension; j++){ for (k=0; k<dimension; k++){ if(user_params->PERTURB_ON_HIGH_RES) { *((float *)HIRES_density_perturb + R_FFT_INDEX(i,j,k)) = growth_factor*boxes->hires_density[R_INDEX(i,j,k)]; } else { *((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) = growth_factor*boxes->lowres_density[HII_R_INDEX(i,j,k)]; } } } } } } else { // Apply Zel'dovich/2LPT correction LOG_DEBUG("Apply Zel'dovich"); #pragma omp parallel shared(LOWRES_density_perturb,HIRES_density_perturb,dimension) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<dimension; i++){ for (j=0; j<dimension; j++){ for (k=0; k<dimension; k++){ if(user_params->PERTURB_ON_HIGH_RES) { *((float *)HIRES_density_perturb + R_FFT_INDEX(i,j,k)) = 0.; } else { *((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) = 0.; } } } } } velocity_displacement_factor = (growth_factor-init_growth_factor) / user_params->BOX_LEN; // now add the missing factor of D #pragma omp parallel shared(boxes,velocity_displacement_factor,dimension) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<dimension; i++){ for (j=0; j<dimension; j++){ for (k=0; k<dimension; k++){ if(user_params->PERTURB_ON_HIGH_RES) { boxes->hires_vx[R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size boxes->hires_vy[R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size boxes->hires_vz[R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size } else { boxes->lowres_vx[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size boxes->lowres_vy[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size boxes->lowres_vz[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size } } } } } // * ************************************************************************* * // // * BEGIN 2LPT PART * // // * ************************************************************************* * // // reference: reference: Scoccimarro R., 1998, MNRAS, 299, 1097-1118 Appendix D if(user_params->USE_2LPT){ LOG_DEBUG("Apply 2LPT"); // allocate memory for the velocity boxes and read them in velocity_displacement_factor_2LPT = (displacement_factor_2LPT - init_displacement_factor_2LPT) / user_params->BOX_LEN; // now add the missing factor in eq. D9 #pragma omp parallel shared(boxes,velocity_displacement_factor_2LPT,dimension) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<dimension; i++){ for (j=0; j<dimension; j++){ for (k=0; k<dimension; k++){ if(user_params->PERTURB_ON_HIGH_RES) { boxes->hires_vx_2LPT[R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size boxes->hires_vy_2LPT[R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size boxes->hires_vz_2LPT[R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size } else { boxes->lowres_vx_2LPT[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size boxes->lowres_vy_2LPT[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size boxes->lowres_vz_2LPT[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size } } } } } } // * ************************************************************************* * // // * END 2LPT PART * // // * ************************************************************************* * // // ************ END INITIALIZATION **************************** // // Perturbing the density field required adding over multiple cells. Store intermediate result as a double to avoid rounding errors if(user_params->PERTURB_ON_HIGH_RES) { resampled_box = (double *)calloc(TOT_NUM_PIXELS,sizeof(double)); } else { resampled_box = (double *)calloc(HII_TOT_NUM_PIXELS,sizeof(double)); } // go through the high-res box, mapping the mass onto the low-res (updated) box LOG_DEBUG("Perturb the density field"); #pragma omp parallel shared(init_growth_factor,boxes,f_pixel_factor,resampled_box,dimension) \ private(i,j,k,xi,xf,yi,yf,zi,zf,HII_i,HII_j,HII_k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->DIM;i++){ for (j=0; j<user_params->DIM;j++){ for (k=0; k<user_params->DIM;k++){ // map indeces to locations in units of box size xf = (i+0.5)/((user_params->DIM)+0.0); yf = (j+0.5)/((user_params->DIM)+0.0); zf = (k+0.5)/((user_params->DIM)+0.0); // update locations if(user_params->PERTURB_ON_HIGH_RES) { xf += (boxes->hires_vx)[R_INDEX(i, j, k)]; yf += (boxes->hires_vy)[R_INDEX(i, j, k)]; zf += (boxes->hires_vz)[R_INDEX(i, j, k)]; } else { HII_i = (unsigned long long)(i/f_pixel_factor); HII_j = (unsigned long long)(j/f_pixel_factor); HII_k = (unsigned long long)(k/f_pixel_factor); xf += (boxes->lowres_vx)[HII_R_INDEX(HII_i, HII_j, HII_k)]; yf += (boxes->lowres_vy)[HII_R_INDEX(HII_i, HII_j, HII_k)]; zf += (boxes->lowres_vz)[HII_R_INDEX(HII_i, HII_j, HII_k)]; } // 2LPT PART // add second order corrections if(user_params->USE_2LPT){ if(user_params->PERTURB_ON_HIGH_RES) { xf -= (boxes->hires_vx_2LPT)[R_INDEX(i,j,k)]; yf -= (boxes->hires_vy_2LPT)[R_INDEX(i,j,k)]; zf -= (boxes->hires_vz_2LPT)[R_INDEX(i,j,k)]; } else { xf -= (boxes->lowres_vx_2LPT)[HII_R_INDEX(HII_i,HII_j,HII_k)]; yf -= (boxes->lowres_vy_2LPT)[HII_R_INDEX(HII_i,HII_j,HII_k)]; zf -= (boxes->lowres_vz_2LPT)[HII_R_INDEX(HII_i,HII_j,HII_k)]; } } xf *= (float)(dimension); yf *= (float)(dimension); zf *= (float)(dimension); while (xf >= (float)(dimension)){ xf -= (dimension);} while (xf < 0){ xf += (dimension);} while (yf >= (float)(dimension)){ yf -= (dimension);} while (yf < 0){ yf += (dimension);} while (zf >= (float)(dimension)){ zf -= (dimension);} while (zf < 0){ zf += (dimension);} xi = xf; yi = yf; zi = zf; if (xi >= (dimension)){ xi -= (dimension);} if (xi < 0) {xi += (dimension);} if (yi >= (dimension)){ yi -= (dimension);} if (yi < 0) {yi += (dimension);} if (zi >= (dimension)){ zi -= (dimension);} if (zi < 0) {zi += (dimension);} if(user_params->PERTURB_ON_HIGH_RES) { #pragma omp atomic resampled_box[R_INDEX(xi,yi,zi)] += (double)(1. + init_growth_factor*(boxes->hires_density)[R_INDEX(i,j,k)]); } else { #pragma omp atomic resampled_box[HII_R_INDEX(xi,yi,zi)] += (double)(1. + init_growth_factor*(boxes->hires_density)[R_INDEX(i,j,k)]); } } } } } LOG_SUPER_DEBUG("resampled_box: "); debugSummarizeBoxDouble(resampled_box, dimension, " "); // Resample back to a float for remaining algorithm #pragma omp parallel shared(LOWRES_density_perturb,HIRES_density_perturb,resampled_box,dimension) \ private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<dimension; i++){ for (j=0; j<dimension; j++){ for (k=0; k<dimension; k++){ if(user_params->PERTURB_ON_HIGH_RES) { *( (float *)HIRES_density_perturb + R_FFT_INDEX(i,j,k) ) = (float)resampled_box[R_INDEX(i,j,k)]; } else { *( (float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k) ) = (float)resampled_box[HII_R_INDEX(i,j,k)]; } } } } } free(resampled_box); LOG_DEBUG("Finished perturbing the density field"); LOG_SUPER_DEBUG("density_perturb: "); if(user_params->PERTURB_ON_HIGH_RES){ debugSummarizeBox(HIRES_density_perturb, dimension, " "); }else{ debugSummarizeBox(LOWRES_density_perturb, dimension, " "); } // deallocate #pragma omp parallel shared(boxes,velocity_displacement_factor,dimension) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<dimension; i++){ for (j=0; j<dimension; j++){ for (k=0; k<dimension; k++){ if(user_params->PERTURB_ON_HIGH_RES) { boxes->hires_vx[R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity boxes->hires_vy[R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity boxes->hires_vz[R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity } else { boxes->lowres_vx[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity boxes->lowres_vy[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity boxes->lowres_vz[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity } } } } } if(user_params->USE_2LPT){ #pragma omp parallel shared(boxes,velocity_displacement_factor_2LPT,dimension) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<dimension; i++){ for (j=0; j<dimension; j++){ for (k=0; k<dimension; k++){ if(user_params->PERTURB_ON_HIGH_RES) { boxes->hires_vx_2LPT[R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity boxes->hires_vy_2LPT[R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity boxes->hires_vz_2LPT[R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity } else { boxes->lowres_vx_2LPT[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity boxes->lowres_vy_2LPT[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity boxes->lowres_vz_2LPT[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity } } } } } } LOG_DEBUG("Cleanup velocities for perturb"); } // Now, if I still have the high resolution density grid (HIRES_density_perturb) I need to downsample it to the low-resolution grid if(user_params->PERTURB_ON_HIGH_RES) { LOG_DEBUG("Downsample the high-res perturbed density"); // Transform to Fourier space to sample (filter) the box dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_density_perturb); // Need to save a copy of the high-resolution unfiltered density field for the velocities memcpy(HIRES_density_perturb_saved, HIRES_density_perturb, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS); // Now filter the box if (user_params->DIM != user_params->HII_DIM) { filter_box(HIRES_density_perturb, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0)); } // FFT back to real space dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_density_perturb); // Renormalise the FFT'd box #pragma omp parallel shared(HIRES_density_perturb,LOWRES_density_perturb,f_pixel_factor,mass_factor) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ *((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) = *((float *)HIRES_density_perturb + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5), (unsigned long long)(j*f_pixel_factor+0.5), (unsigned long long)(k*f_pixel_factor+0.5)))/(float)TOT_NUM_PIXELS; *((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) -= 1.; if (*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) < -1) { *((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) = -1.+FRACT_FLOAT_ERR; } } } } } } else { if (!global_params.EVOLVE_DENSITY_LINEARLY){ #pragma omp parallel shared(LOWRES_density_perturb,mass_factor) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ *( (float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k) ) /= mass_factor; *( (float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k) ) -= 1.; } } } } } } LOG_SUPER_DEBUG("LOWRES_density_perturb: "); debugSummarizeBox(LOWRES_density_perturb, user_params->HII_DIM, " "); // transform to k-space dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, LOWRES_density_perturb); //smooth the field if (!global_params.EVOLVE_DENSITY_LINEARLY && global_params.SMOOTH_EVOLVED_DENSITY_FIELD){ filter_box(LOWRES_density_perturb, 1, 2, global_params.R_smooth_density*user_params->BOX_LEN/(float)user_params->HII_DIM); } LOG_SUPER_DEBUG("LOWRES_density_perturb after smoothing: "); debugSummarizeBox(LOWRES_density_perturb, user_params->HII_DIM, " "); // save a copy of the k-space density field memcpy(LOWRES_density_perturb_saved, LOWRES_density_perturb, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, LOWRES_density_perturb); LOG_SUPER_DEBUG("LOWRES_density_perturb back in real space: "); debugSummarizeBox(LOWRES_density_perturb, user_params->HII_DIM, " "); // normalize after FFT int bad_count=0; #pragma omp parallel shared(LOWRES_density_perturb) private(i,j,k) num_threads(user_params->N_THREADS) reduction(+: bad_count) { #pragma omp for for(i=0; i<user_params->HII_DIM; i++){ for(j=0; j<user_params->HII_DIM; j++){ for(k=0; k<user_params->HII_DIM; k++){ *((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) /= (float)HII_TOT_NUM_PIXELS; if (*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) < -1.0) { // shouldn't happen if(bad_count<5) LOG_WARNING("LOWRES_density_perturb is <-1 for index %d %d %d (value=%f)", i,j,k, *((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k))); if(bad_count==5) LOG_WARNING("Skipping further warnings for LOWRES_density_perturb."); *((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) = -1+FRACT_FLOAT_ERR; bad_count++; } } } } } if(bad_count>=5) LOG_WARNING("Total number of bad indices for LOW_density_perturb: %d", bad_count); LOG_SUPER_DEBUG("LOWRES_density_perturb back in real space (normalized): "); debugSummarizeBox(LOWRES_density_perturb, user_params->HII_DIM, " "); #pragma omp parallel shared(perturbed_field,LOWRES_density_perturb) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ *((float *)perturbed_field->density + HII_R_INDEX(i,j,k)) = *((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)); } } } } // **** Convert to velocities ***** // LOG_DEBUG("Generate velocity fields"); float k_x, k_y, k_z, k_sq, dDdt_over_D; int n_x, n_y, n_z; dDdt_over_D = dDdt/growth_factor; if(user_params->PERTURB_ON_HIGH_RES) { // We are going to generate the velocity field on the high-resolution perturbed density grid memcpy(HIRES_density_perturb, HIRES_density_perturb_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS); } else { // We are going to generate the velocity field on the low-resolution perturbed density grid memcpy(LOWRES_density_perturb, LOWRES_density_perturb_saved, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); } #pragma omp parallel shared(LOWRES_density_perturb,HIRES_density_perturb,dDdt_over_D,dimension,switch_mid) \ private(n_x,n_y,n_z,k_x,k_y,k_z,k_sq) num_threads(user_params->N_THREADS) { #pragma omp for for (n_x=0; n_x<dimension; n_x++){ if (n_x>switch_mid) k_x =(n_x-dimension) * DELTA_K; // wrap around for FFT convention else k_x = n_x * DELTA_K; for (n_y=0; n_y<dimension; n_y++){ if (n_y>switch_mid) k_y =(n_y-dimension) * DELTA_K; else k_y = n_y * DELTA_K; for (n_z=0; n_z<=switch_mid; n_z++){ k_z = n_z * DELTA_K; k_sq = k_x*k_x + k_y*k_y + k_z*k_z; // now set the velocities if ((n_x==0) && (n_y==0) && (n_z==0)) { // DC mode if(user_params->PERTURB_ON_HIGH_RES) { HIRES_density_perturb[0] = 0; } else { LOWRES_density_perturb[0] = 0; } } else{ if(user_params->PERTURB_ON_HIGH_RES) { HIRES_density_perturb[C_INDEX(n_x,n_y,n_z)] *= dDdt_over_D*k_z*I/k_sq/(TOT_NUM_PIXELS+0.0); } else { LOWRES_density_perturb[HII_C_INDEX(n_x,n_y,n_z)] *= dDdt_over_D*k_z*I/k_sq/(HII_TOT_NUM_PIXELS+0.0); } } } } } } if(user_params->PERTURB_ON_HIGH_RES) { // smooth the high resolution field ready for resampling if (user_params->DIM != user_params->HII_DIM) filter_box(HIRES_density_perturb, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0)); dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_density_perturb); #pragma omp parallel shared(perturbed_field,HIRES_density_perturb,f_pixel_factor) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ *((float *)perturbed_field->velocity + HII_R_INDEX(i,j,k)) = *((float *)HIRES_density_perturb + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5), (unsigned long long)(j*f_pixel_factor+0.5), (unsigned long long)(k*f_pixel_factor+0.5))); } } } } } else { dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, LOWRES_density_perturb); #pragma omp parallel shared(perturbed_field,LOWRES_density_perturb) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ *((float *)perturbed_field->velocity + HII_R_INDEX(i,j,k)) = *((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)); } } } } } fftwf_cleanup_threads(); fftwf_cleanup(); fftwf_forget_wisdom(); // deallocate fftwf_free(LOWRES_density_perturb); fftwf_free(LOWRES_density_perturb_saved); if(user_params->PERTURB_ON_HIGH_RES) { fftwf_free(HIRES_density_perturb); fftwf_free(HIRES_density_perturb_saved); } fftwf_cleanup(); } // End of Try{} Catch(status){ return(status); } return(0); }
residualbased_newton_raphson_strategy.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY) #define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY // System includes // External includes // Project includes #include "includes/define.h" #include "solving_strategies/strategies/solving_strategy.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/builtin_timer.h" //default builder and solver #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedNewtonRaphsonStrategy * @ingroup KratosCore * @brief This is the base Newton Raphson strategy * @details This strategy iterates until the convergence is achieved (or the maximum number of iterations is surpassed) using a Newton Raphson algorithm * @author Riccardo Rossi */ template <class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedNewtonRaphsonStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; // Counted pointer of ClassName KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedNewtonRaphsonStrategy); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor */ explicit ResidualBasedNewtonRaphsonStrategy() : BaseType() { } /** * @brief Default constructor. (with parameters) * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ explicit ResidualBasedNewtonRaphsonStrategy(ModelPart& rModelPart) : ResidualBasedNewtonRaphsonStrategy(rModelPart, ResidualBasedNewtonRaphsonStrategy::GetDefaultParameters()) { } /** * @brief Default constructor. (with parameters) * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ explicit ResidualBasedNewtonRaphsonStrategy(ModelPart& rModelPart, Parameters ThisParameters) : BaseType(rModelPart), mSolutionStepIsInitialized(false), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); if (p_builder_and_solver != nullptr) { // Tells to the builder and solver if the reactions have to be Calculated or not p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to // be reshaped at each step or not p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); } else { KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "BuilderAndSolver is not initialized. Please assign one before settings flags" << std::endl; } mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); } /** * Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, int MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false) : BaseType(rModelPart, MoveMeshFlag), mpScheme(pScheme), mpConvergenceCriteria(pNewConvergenceCriteria), mReformDofSetAtEachStep(ReformDofSetAtEachStep), mCalculateReactionsFlag(CalculateReactions), mSolutionStepIsInitialized(false), mMaxIterationNumber(MaxIterations), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY; // Setting up the default builder and solver mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer( new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSolver)); // Tells to the builder and solver if the reactions have to be Calculated or not mpBuilderAndSolver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to // be reshaped at each step or not mpBuilderAndSolver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH(""); } /** * @brief Constructor specifying the builder and solver * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewConvergenceCriteria The convergence criteria employed * @param pNewBuilderAndSolver The builder and solver employed * @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, int MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false) : BaseType(rModelPart, MoveMeshFlag), mpScheme(pScheme), mpBuilderAndSolver(pNewBuilderAndSolver), mpConvergenceCriteria(pNewConvergenceCriteria), mReformDofSetAtEachStep(ReformDofSetAtEachStep), mCalculateReactionsFlag(CalculateReactions), mSolutionStepIsInitialized(false), mMaxIterationNumber(MaxIterations), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); // Tells to the builder and solver if the reactions have to be Calculated or not p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to //be reshaped at each step or not p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH("") } /** * @brief Constructor specifying the builder and solver * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param pNewBuilderAndSolver The builder and solver employed * @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ KRATOS_DEPRECATED_MESSAGE("Constructor deprecated, please use the constructor without linear solver") explicit ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, int MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag) { KRATOS_TRY KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "This constructor is deprecated, please use the constructor without linear solver" << std::endl; // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); // We check if the linear solver considered for the builder and solver is consistent auto p_linear_solver = p_builder_and_solver->GetLinearSystemSolver(); KRATOS_ERROR_IF(p_linear_solver != pNewLinearSolver) << "Inconsistent linear solver in strategy and builder and solver. Considering the linear solver assigned to builder and solver :\n" << p_linear_solver->Info() << "\n instead of:\n" << pNewLinearSolver->Info() << std::endl; KRATOS_CATCH("") } /** * Constructor with Parameters * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param Settings Settings used in the strategy */ ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, Parameters Settings) : BaseType(rModelPart), mpScheme(pScheme), mpConvergenceCriteria(pNewConvergenceCriteria), mSolutionStepIsInitialized(false), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY; // Setting up the default builder and solver mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer( new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSolver)); // Tells to the builder and solver if the reactions have to be Calculated or not mpBuilderAndSolver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to // be reshaped at each step or not mpBuilderAndSolver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH(""); } /** * @brief Constructor specifying the builder and solver and using Parameters * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param pNewBuilderAndSolver The builder and solver employed * @param Settings Settings used in the strategy */ ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, Parameters Settings) : BaseType(rModelPart), mpScheme(pScheme), mpBuilderAndSolver(pNewBuilderAndSolver), mpConvergenceCriteria(pNewConvergenceCriteria), mSolutionStepIsInitialized(false), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY // Validate and assign defaults Settings = this->ValidateAndAssignParameters(Settings, this->GetDefaultParameters()); this->AssignSettings(Settings); // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); // Tells to the builder and solver if the reactions have to be Calculated or not p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to //be reshaped at each step or not p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH("") } /** * @brief Constructor specifying the builder and solver and using Parameters * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param pNewBuilderAndSolver The builder and solver employed * @param Parameters Settings used in the strategy */ KRATOS_DEPRECATED_MESSAGE("Constructor deprecated, please use the constructor without linear solver") ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, Parameters Settings) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, Settings) { KRATOS_TRY KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "This constructor is deprecated, please use the constructor without linear solver" << std::endl; // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); // We check if the linear solver considered for the builder and solver is consistent auto p_linear_solver = p_builder_and_solver->GetLinearSystemSolver(); KRATOS_ERROR_IF(p_linear_solver != pNewLinearSolver) << "Inconsistent linear solver in strategy and builder and solver. Considering the linear solver assigned to builder and solver :\n" << p_linear_solver->Info() << "\n instead of:\n" << pNewLinearSolver->Info() << std::endl; KRATOS_CATCH("") } /** * @brief Destructor. * @details In trilinos third party library, the linear solver's preconditioner should be freed before the system matrix. We control the deallocation order with Clear(). */ ~ResidualBasedNewtonRaphsonStrategy() override { // If the linear solver has not been deallocated, clean it before // deallocating mpA. This prevents a memory error with the the ML // solver (which holds a reference to it). // NOTE: The linear solver is hold by the B&S auto p_builder_and_solver = this->GetBuilderAndSolver(); if (p_builder_and_solver != nullptr) { p_builder_and_solver->Clear(); } // Deallocating system vectors to avoid errors in MPI. Clear calls // TrilinosSpace::Clear for the vectors, which preserves the Map of // current vectors, performing MPI calls in the process. Due to the // way Python garbage collection works, this may happen after // MPI_Finalize has already been called and is an error. Resetting // the pointers here prevents Clear from operating with the // (now deallocated) vectors. mpA.reset(); mpDx.reset(); mpb.reset(); Clear(); } /** * @brief Set method for the time scheme * @param pScheme The pointer to the time scheme considered */ void SetScheme(typename TSchemeType::Pointer pScheme) { mpScheme = pScheme; }; /** * @brief Get method for the time scheme * @return mpScheme: The pointer to the time scheme considered */ typename TSchemeType::Pointer GetScheme() { return mpScheme; }; /** * @brief Set method for the builder and solver * @param pNewBuilderAndSolver The pointer to the builder and solver considered */ void SetBuilderAndSolver(typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver) { mpBuilderAndSolver = pNewBuilderAndSolver; }; /** * @brief Get method for the builder and solver * @return mpBuilderAndSolver: The pointer to the builder and solver considered */ typename TBuilderAndSolverType::Pointer GetBuilderAndSolver() { return mpBuilderAndSolver; }; /** * @brief This method sets the flag mInitializeWasPerformed * @param InitializePerformedFlag The flag that tells if the initialize has been computed */ void SetInitializePerformedFlag(bool InitializePerformedFlag = true) { mInitializeWasPerformed = InitializePerformedFlag; } /** * @brief This method gets the flag mInitializeWasPerformed * @return mInitializeWasPerformed: The flag that tells if the initialize has been computed */ bool GetInitializePerformedFlag() { return mInitializeWasPerformed; } /** * @brief This method sets the flag mCalculateReactionsFlag * @param CalculateReactionsFlag The flag that tells if the reactions are computed */ void SetCalculateReactionsFlag(bool CalculateReactionsFlag) { mCalculateReactionsFlag = CalculateReactionsFlag; } /** * @brief This method returns the flag mCalculateReactionsFlag * @return The flag that tells if the reactions are computed */ bool GetCalculateReactionsFlag() { return mCalculateReactionsFlag; } /** * @brief This method sets the flag mFullUpdateFlag * @param UseOldStiffnessInFirstIterationFlag The flag that tells if */ void SetUseOldStiffnessInFirstIterationFlag(bool UseOldStiffnessInFirstIterationFlag) { mUseOldStiffnessInFirstIteration = UseOldStiffnessInFirstIterationFlag; } /** * @brief This method returns the flag mFullUpdateFlag * @return The flag that tells if */ bool GetUseOldStiffnessInFirstIterationFlag() { return mUseOldStiffnessInFirstIteration; } /** * @brief This method sets the flag mReformDofSetAtEachStep * @param Flag The flag that tells if each time step the system is rebuilt */ void SetReformDofSetAtEachStepFlag(bool Flag) { mReformDofSetAtEachStep = Flag; GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep); } /** * @brief This method returns the flag mReformDofSetAtEachStep * @return The flag that tells if each time step the system is rebuilt */ bool GetReformDofSetAtEachStepFlag() { return mReformDofSetAtEachStep; } /** * @brief This method sets the flag mMaxIterationNumber * @param MaxIterationNumber This is the maximum number of on linear iterations */ void SetMaxIterationNumber(unsigned int MaxIterationNumber) { mMaxIterationNumber = MaxIterationNumber; } /** * @brief This method gets the flag mMaxIterationNumber * @return mMaxIterationNumber: This is the maximum number of on linear iterations */ unsigned int GetMaxIterationNumber() { return mMaxIterationNumber; } /** * @brief It sets the level of echo for the solving strategy * @param Level The level to set * @details The different levels of echo are: * - 0: Mute... no echo at all * - 1: Printing time and basic informations * - 2: Printing linear solver data * - 3: Print of debug informations: Echo of stiffness matrix, Dx, b... */ void SetEchoLevel(int Level) override { BaseType::mEchoLevel = Level; GetBuilderAndSolver()->SetEchoLevel(Level); } //********************************************************************************* /**OPERATIONS ACCESSIBLE FROM THE INPUT: **/ /** * @brief Create method * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ typename BaseType::Pointer Create( ModelPart& rModelPart, Parameters ThisParameters ) const override { return Kratos::make_shared<ClassType>(rModelPart, ThisParameters); } /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY const DataCommunicator &r_comm = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator(); //OPERATIONS THAT SHOULD BE DONE ONCE - internal check to avoid repetitions //if the operations needed were already performed this does nothing if (mInitializeWasPerformed == false) Initialize(); //initialize solution step if (mSolutionStepIsInitialized == false) InitializeSolutionStep(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; DofsArrayType& r_dof_set = GetBuilderAndSolver()->GetDofSet(); GetScheme()->Predict(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb); // Applying constraints if needed auto& r_constraints_array = BaseType::GetModelPart().MasterSlaveConstraints(); const int local_number_of_constraints = r_constraints_array.size(); const int global_number_of_constraints = r_comm.SumAll(local_number_of_constraints); if(global_number_of_constraints != 0) { const auto& r_process_info = BaseType::GetModelPart().GetProcessInfo(); const auto it_const_begin = r_constraints_array.begin(); #pragma omp parallel for for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i) (it_const_begin + i)->ResetSlaveDofs(r_process_info); #pragma omp parallel for for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i) (it_const_begin + i)->Apply(r_process_info); // The following is needed since we need to eventually compute time derivatives after applying // Master slave relations TSparseSpace::SetToZero(rDx); this->GetScheme()->Update(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb); } // Move the mesh if needed if (this->MoveMeshFlag() == true) BaseType::MoveMesh(); KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY; if (mInitializeWasPerformed == false) { //pointers needed in the solution typename TSchemeType::Pointer p_scheme = GetScheme(); typename TConvergenceCriteriaType::Pointer p_convergence_criteria = mpConvergenceCriteria; //Initialize The Scheme - OPERATIONS TO BE DONE ONCE if (p_scheme->SchemeIsInitialized() == false) p_scheme->Initialize(BaseType::GetModelPart()); //Initialize The Elements - OPERATIONS TO BE DONE ONCE if (p_scheme->ElementsAreInitialized() == false) p_scheme->InitializeElements(BaseType::GetModelPart()); //Initialize The Conditions - OPERATIONS TO BE DONE ONCE if (p_scheme->ConditionsAreInitialized() == false) p_scheme->InitializeConditions(BaseType::GetModelPart()); //initialisation of the convergence criteria if (p_convergence_criteria->IsInitialized() == false) p_convergence_criteria->Initialize(BaseType::GetModelPart()); mInitializeWasPerformed = true; } KRATOS_CATCH(""); } /** * @brief Clears the internal storage */ void Clear() override { KRATOS_TRY; // Setting to zero the internal flag to ensure that the dof sets are recalculated. Also clear the linear solver stored in the B&S auto p_builder_and_solver = GetBuilderAndSolver(); if (p_builder_and_solver != nullptr) { p_builder_and_solver->SetDofSetIsInitializedFlag(false); p_builder_and_solver->Clear(); } // Clearing the system of equations if (mpA != nullptr) SparseSpaceType::Clear(mpA); if (mpDx != nullptr) SparseSpaceType::Clear(mpDx); if (mpb != nullptr) SparseSpaceType::Clear(mpb); // Clearing scheme auto p_scheme = GetScheme(); if (p_scheme != nullptr) { GetScheme()->Clear(); } mInitializeWasPerformed = false; mSolutionStepIsInitialized = false; KRATOS_CATCH(""); } /** * @brief This should be considered as a "post solution" convergence check which is useful for coupled analysis - the convergence criteria used is the one used inside the "solve" step */ bool IsConverged() override { KRATOS_TRY; TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; if (mpConvergenceCriteria->GetActualizeRHSflag() == true) { TSparseSpace::SetToZero(rb); GetBuilderAndSolver()->BuildRHS(GetScheme(), BaseType::GetModelPart(), rb); } return mpConvergenceCriteria->PostCriteria(BaseType::GetModelPart(), GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); KRATOS_CATCH(""); } /** * @brief This operations should be called before printing the results when non trivial results * (e.g. stresses) * Need to be calculated given the solution of the step * @details This operations should be called only when needed, before printing as it can involve a non * negligible cost */ void CalculateOutputData() override { TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; GetScheme()->CalculateOutputData(BaseType::GetModelPart(), GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); } /** * @brief Performs all the required operations that should be done (for each step) before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { KRATOS_TRY; if (!mSolutionStepIsInitialized) { // Pointers needed in the solution typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); ModelPart& r_model_part = BaseType::GetModelPart(); //set up the system, operation performed just once unless it is required //to reform the dof set at each iteration BuiltinTimer system_construction_time; if (p_builder_and_solver->GetDofSetIsInitializedFlag() == false || mReformDofSetAtEachStep == true) { //setting up the list of the DOFs to be solved BuiltinTimer setup_dofs_time; p_builder_and_solver->SetUpDofSet(p_scheme, r_model_part); KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "Setup Dofs Time: " << setup_dofs_time.ElapsedSeconds() << std::endl; //shaping correctly the system BuiltinTimer setup_system_time; p_builder_and_solver->SetUpSystem(r_model_part); KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "Setup System Time: " << setup_system_time.ElapsedSeconds() << std::endl; //setting up the Vectors involved to the correct size BuiltinTimer system_matrix_resize_time; p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, mpA, mpDx, mpb, r_model_part); KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "System Matrix Resize Time: " << system_matrix_resize_time.ElapsedSeconds() << std::endl; } KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "System Construction Time: " << system_construction_time.ElapsedSeconds() << std::endl; TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; // Initial operations ... things that are constant over the Solution Step p_builder_and_solver->InitializeSolutionStep(r_model_part, rA, rDx, rb); // Initial operations ... things that are constant over the Solution Step p_scheme->InitializeSolutionStep(r_model_part, rA, rDx, rb); // Initialisation of the convergence criteria if (mpConvergenceCriteria->GetActualizeRHSflag() == true) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } mpConvergenceCriteria->InitializeSolutionStep(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); if (mpConvergenceCriteria->GetActualizeRHSflag() == true) TSparseSpace::SetToZero(rb); mSolutionStepIsInitialized = true; } KRATOS_CATCH(""); } /** * @brief Performs all the required operations that should be done (for each step) after solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void FinalizeSolutionStep() override { KRATOS_TRY; ModelPart& r_model_part = BaseType::GetModelPart(); typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; //Finalisation of the solution step, //operations to be done after achieving convergence, for example the //Final Residual Vector (mb) has to be saved in there //to avoid error accumulation p_scheme->FinalizeSolutionStep(r_model_part, rA, rDx, rb); p_builder_and_solver->FinalizeSolutionStep(r_model_part, rA, rDx, rb); mpConvergenceCriteria->FinalizeSolutionStep(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); //Cleaning memory after the solution p_scheme->Clean(); //reset flags for next step mSolutionStepIsInitialized = false; if (mReformDofSetAtEachStep == true) //deallocate the systemvectors { this->Clear(); } KRATOS_CATCH(""); } /** * @brief Solves the current step. This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { // Pointers needed in the solution ModelPart& r_model_part = BaseType::GetModelPart(); typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); auto& r_dof_set = p_builder_and_solver->GetDofSet(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; //initializing the parameters of the Newton-Raphson cycle unsigned int iteration_number = 1; r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; bool residual_is_updated = false; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); bool is_converged = mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); // Function to perform the building and the solving phase. if (BaseType::mRebuildLevel > 0 || BaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); if (mUseOldStiffnessInFirstIteration){ p_builder_and_solver->BuildAndSolveLinearizedOnPreviousIteration(p_scheme, r_model_part, rA, rDx, rb,BaseType::MoveMeshFlag()); } else { p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); // Dx = 0.00; TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } // Debugging info EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, BaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); if (is_converged) { if (mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } is_converged = mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } //Iteration Cycle... performed only for NonLinearProblems while (is_converged == false && iteration_number++ < mMaxIterationNumber) { //setting the number of iteration r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); //call the linear system solver to find the correction mDx for the //it is not called if there is no system to solve if (SparseSpaceType::Size(rDx) != 0) { if (BaseType::mRebuildLevel > 1 || BaseType::mStiffnessMatrixIsBuilt == false) { if (GetKeepSystemConstantDuringIterations() == false) { //A = 0.00; TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { KRATOS_WARNING("NO DOFS") << "ATTENTION: no free DOFs!! " << std::endl; } // Debugging info EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, BaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); residual_is_updated = false; if (is_converged == true) { if (mpConvergenceCriteria->GetActualizeRHSflag() == true) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); residual_is_updated = true; } is_converged = mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } } //plots a warning if the maximum number of iterations is exceeded if (iteration_number >= mMaxIterationNumber) { MaxIterationsExceeded(); } else { KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", this->GetEchoLevel() > 0) << "Convergence achieved after " << iteration_number << " / " << mMaxIterationNumber << " iterations" << std::endl; } //recalculate residual if needed //(note that some convergence criteria need it to be recalculated) if (residual_is_updated == false) { // NOTE: // The following part will be commented because it is time consuming // and there is no obvious reason to be here. If someone need this // part please notify the community via mailing list before uncommenting it. // Pooyan. // TSparseSpace::SetToZero(mb); // p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb); } //calculate reactions if required if (mCalculateReactionsFlag == true) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); return is_converged; } /** * @brief Function to perform expensive checks. * @details It is designed to be called ONCE to verify that the input is correct. */ int Check() override { KRATOS_TRY BaseType::Check(); GetBuilderAndSolver()->Check(BaseType::GetModelPart()); GetScheme()->Check(BaseType::GetModelPart()); mpConvergenceCriteria->Check(BaseType::GetModelPart()); return 0; KRATOS_CATCH("") } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "newton_raphson_strategy", "use_old_stiffness_in_first_iteration": false, "max_iteration" : 10, "reform_dofs_at_each_step" : false, "compute_reactions" : false, "builder_and_solver_settings" : {}, "convergence_criteria_settings" : {}, "linear_solver_settings" : {}, "scheme_settings" : {} })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "newton_raphson_strategy"; } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ /** * @brief This method returns the LHS matrix * @return The LHS matrix */ TSystemMatrixType &GetSystemMatrix() override { TSystemMatrixType &mA = *mpA; return mA; } /** * @brief This method returns the RHS vector * @return The RHS vector */ TSystemVectorType& GetSystemVector() override { TSystemVectorType& mb = *mpb; return mb; } /** * @brief This method returns the solution vector * @return The Dx vector */ TSystemVectorType& GetSolutionVector() override { TSystemVectorType& mDx = *mpDx; return mDx; } /** * @brief Set method for the flag mKeepSystemConstantDuringIterations * @param Value If we consider constant the system of equations during the iterations */ void SetKeepSystemConstantDuringIterations(bool Value) { mKeepSystemConstantDuringIterations = Value; } /** * @brief Get method for the flag mKeepSystemConstantDuringIterations * @return True if we consider constant the system of equations during the iterations, false otherwise */ bool GetKeepSystemConstantDuringIterations() { return mKeepSystemConstantDuringIterations; } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedNewtonRaphsonStrategy"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} private: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} protected: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ typename TSchemeType::Pointer mpScheme = nullptr; /// The pointer to the time scheme employed typename TBuilderAndSolverType::Pointer mpBuilderAndSolver = nullptr; /// The pointer to the builder and solver employed typename TConvergenceCriteriaType::Pointer mpConvergenceCriteria = nullptr; /// The pointer to the convergence criteria employed TSystemVectorPointerType mpDx; /// The increment in the solution TSystemVectorPointerType mpb; /// The RHS vector of the system of equations TSystemMatrixPointerType mpA; /// The LHS matrix of the system of equations /** * @brief Flag telling if it is needed to reform the DofSet at each solution step or if it is possible to form it just once * @details Default = false - true : Reforme at each time step - false : Form just one (more efficient) */ bool mReformDofSetAtEachStep; /** * @brief Flag telling if it is needed or not to compute the reactions * @details default = true */ bool mCalculateReactionsFlag; /** * @brief Flag telling if a full update of the database will be performed at the first iteration * @details default = false */ bool mUseOldStiffnessInFirstIteration = false; bool mSolutionStepIsInitialized; /// Flag to set as initialized the solution step unsigned int mMaxIterationNumber; /// The maximum number of iterations, 30 by default bool mInitializeWasPerformed; /// Flag to set as initialized the strategy bool mKeepSystemConstantDuringIterations; // Flag to allow keeping system matrix constant during iterations ///@} ///@name Private Operators ///@{ /** * @brief Here the database is updated * @param A The LHS matrix of the system of equations * @param Dx The incremement in the solution * @param b The RHS vector of the system of equations * @param MoveMesh The flag that allows to move the mesh */ virtual void UpdateDatabase( TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb, const bool MoveMesh) { typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); p_scheme->Update(BaseType::GetModelPart(), p_builder_and_solver->GetDofSet(), rA, rDx, rb); // Move the mesh if needed if (MoveMesh == true) BaseType::MoveMesh(); } /** * @brief This method returns the components of the system of equations depending of the echo level * @param IterationNumber The non linear iteration in the solution loop */ virtual void EchoInfo(const unsigned int IterationNumber) { TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; if (this->GetEchoLevel() == 2) //if it is needed to print the debug info { KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl; KRATOS_INFO("RHS") << "RHS = " << rb << std::endl; } else if (this->GetEchoLevel() == 3) //if it is needed to print the debug info { KRATOS_INFO("LHS") << "SystemMatrix = " << rA << std::endl; KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl; KRATOS_INFO("RHS") << "RHS = " << rb << std::endl; } else if (this->GetEchoLevel() == 4) //print to matrix market file { std::stringstream matrix_market_name; matrix_market_name << "A_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << "_" << IterationNumber << ".mm"; TSparseSpace::WriteMatrixMarketMatrix((char *)(matrix_market_name.str()).c_str(), rA, false); std::stringstream matrix_market_vectname; matrix_market_vectname << "b_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << "_" << IterationNumber << ".mm.rhs"; TSparseSpace::WriteMatrixMarketVector((char *)(matrix_market_vectname.str()).c_str(), rb); } } /** * @brief This method prints information after reach the max number of iterations */ virtual void MaxIterationsExceeded() { KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", this->GetEchoLevel() > 0) << "ATTENTION: max iterations ( " << mMaxIterationNumber << " ) exceeded!" << std::endl; } /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); mMaxIterationNumber = ThisParameters["max_iteration"].GetInt(); mReformDofSetAtEachStep = ThisParameters["reform_dofs_at_each_step"].GetBool(); mCalculateReactionsFlag = ThisParameters["compute_reactions"].GetBool(); mUseOldStiffnessInFirstIteration = ThisParameters["use_old_stiffness_in_first_iteration"].GetBool(); // Saving the convergence criteria to be used if (ThisParameters["convergence_criteria_settings"].Has("name")) { KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl; } // Saving the scheme if (ThisParameters["scheme_settings"].Has("name")) { KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl; } // Setting up the default builder and solver if (ThisParameters["builder_and_solver_settings"].Has("name")) { KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl; } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /** * Copy constructor. */ ResidualBasedNewtonRaphsonStrategy(const ResidualBasedNewtonRaphsonStrategy &Other){}; ///@} }; /* Class ResidualBasedNewtonRaphsonStrategy */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos. */ #endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY defined */
testing_dgeqrf.c
/** * * @file example_dpotrf.c * * PLASMA testing routines * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @brief Example of Cholesky factorization * * @version 2.6.0 * @author Bilel Hadri * @date 2010-11-15 * @generated d Tue Jan 7 11:45:20 2014 * **/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <plasma.h> #include <cblas.h> #include <lapacke.h> #include <core_blas.h> #include "testing_dmain.h" static int check_factorization(int, int, double*, double*, int); static void GENMAT_SYM_FULL(int m, int n, double *A) { srand48(time(NULL)); int j; for (j = 0; j < m; ++j ) { int i; for( i = j; i < n; ++i ) { double dran = drand48(); A[j*m+i] = A[i*m+j] = dran; } } for(j = 0; j < m; ++j) A[j*m+j] += 10 * m; } int testing_dgeqrf(int argc, char **argv) { int M = 1000; int N = 1000; int LDA = 1000; int info_factorization; double *A1 = (double *)malloc(LDA*N*sizeof(double)); double *A2 = (double *)malloc(LDA*N*sizeof(double)); #pragma omp register ([LDA*N]A2) /* Check if unable to allocate memory */ if ((!A1)||(!A2)){ printf("Out of Memory \n "); return EXIT_SUCCESS; } /* Initialize A1 and A2 for Symmetric Positive Matrix */ GENMAT_SYM_FULL(LDA, N, A1); int i; for(i = 0; i < N*LDA; ++i){ A2[i] = A1[i]; } /* Plasma routines */ PLASMA_desc *T; PLASMA_Alloc_Workspace_dgels(M, N, &T); PLASMA_dgeqrf(M, N, A2, LDA, T); /* Check the factorization */ info_factorization = check_factorization( M, N, A1, A2, LDA); if ( info_factorization != 0 ) printf("-- Error in DPOTRF example ! \n"); else printf("-- Run of DPOTRF example successful ! \n"); free(A1); free(A2); return 0; } static int check_factorization(int M, int N, double *A1, double *A2, int LDA) { double Anorm, Rnorm; double Anorm1, Rnorm1; double alpha; int info_factorization; int i,j; double eps; eps = LAPACKE_dlamch_work('e'); // double *Residual = (double *)malloc(M*N*sizeof(double)); // double *L1 = (double *)malloc(M*N*sizeof(double)); // double *L2 = (double *)malloc(M*N*sizeof(double)); double *work = (double *)malloc(N*sizeof(double)); LAPACKE_dgeqrf(LAPACK_COL_MAJOR, M, N, A1, M, work); Rnorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, A1, M, work); Anorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, A2, LDA, work); Rnorm1 = PLASMA_dlange(PlasmaInfNorm, M, N, A1, LDA); Anorm1 = PLASMA_dlange(PlasmaInfNorm, M, N, A2, LDA); printf("|Rnorm-Rnorm1|: %e, |Anorm-Anorm1|: %e\n", fabs(Rnorm-Rnorm1), fabs(Anorm-Anorm1)); printf("============\n"); printf("Checking the QR Factorization \n"); printf("-- ||L'L-A||_oo/(||A||_oo.N.eps) = %e \n",fabs(Rnorm-Anorm)/(Anorm)); if ( isnan(fabs(Rnorm-Anorm)/(Anorm)) || (fabs(Rnorm-Anorm)/(Anorm) > 10.0) ){ printf("-- Factorization is suspicious ! \n"); info_factorization = 1; } else{ printf("-- Factorization is CORRECT ! \n"); info_factorization = 0; } // free(Residual); free(L1); free(L2); free(work); return info_factorization; }
main.c
/*************** 2D LBM-DEM Code **************************/ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include <unistd.h> #include <time.h> #include <assert.h> #include "visit_writer.h" #ifdef _OPENMP #include <omp.h> #endif // Switch on or off FLUID #define _FLUIDE_ // Maximum number of soil grains #ifndef nbgrainsMax #define nbgrainsMax 40000 #endif // Dimension of the LBM Fluid domain #ifndef scale #define scale 1. #endif #ifndef lx <<<<<<< HEAD #define lx 19169 #endif #ifndef ly #define ly 1424 ======= #define lx 7826 #endif #ifndef ly #define ly 2325 >>>>>>> e10ce4e26b3e33ab0cb34449a8df7fe954929762 #endif #ifdef SINGLE_PRECISION typedef float real; #define FLOAT_FORMAT "%e" #else typedef double real; #define FLOAT_FORMAT "%le" #endif #define pi 3.14159265358979 #define rhoS 2650 // Density of solids #define rhoW 1000 // Density of water #define duration 1.5 // Duration of simulation //********************* Data LBM ************************ #define Q 9 int nbgrains; // width of LBM grid size, time step, lattice speed real dx, dtLB, c, c_squ; real _w[Q] = {4. / 9, 1. / 36, 1. / 9, 1. / 36, 1. / 9, 1. / 36, 1. / 9, 1. / 36, 1. / 9}; real * restrict w = _w; real (* restrict f)[ly][Q]; // ************************************ // * * // * e2 e9 e8 * // * \ | / * // * \ | / * // * e3--- e1 ---e7 * // * / | \ * // * / | \ * // * e4 e5 e6 * // * * // ************************************ int ex[Q] = {0, -1, -1, -1, 0, 1, 1, 1, 0}; int ey[Q] = {0, 1, 0, -1, -1, -1, 0, 1, 1}; // average fluid density real rho_moy = 1000; // air density =1 or water =1000 or 999.7 at 20 real rho_outlet, q_outlet; // relaxation parameter real tau = 0.504; real s2 = 1.5, s3 = 1.4, s5 = 1.5, s7 = 1.5, s8 = 1.9841, s9 = 1.9841; // s8=1.6666667,s9=1.6666667; // obstacle array int (* restrict obst)[ly]; // obstacle activity array int (* restrict act)[ly]; real (* restrict delta)[ly][Q]; // LB diameter for the smallest disk (in nodes number) real rMin_LB = 10.; // Fluid kinematic viscosity real nu = 1e-6; // 15.5e-6 for air and 1e-6 for water at 293K or 20C real (* restrict press)[ly]; real reductionR = 0.85; // LBM reduced grain diameter //*********** Data DEM ******************** real G = 9.81; real angleG = 0.0; real xG, yG; real dt; //=5.e-8; real dt2; // Spring stiffness real km = 3e+6, kg = 1.6e+6; // 2e8 1.6e8 real kt = 1.0e+6, ktm = 2e+6; /// changed to higher for lesser /// interpenetration in sample generation 1.3e8 real nug = 6.4e+1; // 1.1e1 real num = 8.7e+1, numb = 8.7e+1; // 1.5e1 real nuf = 1.5e-1, nugt = 5e-1; // frictionless packing nugt real mu = .5317; real mug = 0.0; // Mu for assembling real mum = .466, mumb = .466; // 0.466 //0.53 0.51 0.43 real murf = 0.01; // 0.01 real r = 1e-3; // 5e-4;v real distVerlet = 5e-4; // changed to 1e-6 from 1e-3 for error long UpdateVerlet = 100.; real dtt = 0.; // Time after which wall to prepare sample is removed real iterDEM = 100.; // number of DEM iterations per LBM // Tracked stats // EPE-Effective potential energy // Total Wall Friction - WF, // SE- Strain Energy ESE & IFR- Total Internal Friction real xfront, height, energie_cin, energie_x, energie_y, energie_teta, energy_p, energy_EPE, zmean, SE, ESE, WF, IFR; real TSE = 0.0, TBW = 0.0, INCE = 0.0, TSLIP = 0.0, TRW = 0.0; // Total Body Work and Strain Energy TRW_ Total Rotational Work real pf = 0., pft = 0., pff = 0.; // previous force real ic = 0; // ******** Control parameters ************* // Number of DEM steps in LB int npDEM; real *rLB; <<<<<<< HEAD int stepView = 800; int stepPrint = 1600; int stepConsole = 800; int stepStrob = 5000; //visualisation steps int stepFilm = 10000; ======= int stepView = 400; int stepPrint = 800; int stepConsole = 400; int stepStrob = 4000; //visualisation steps int stepFilm = 8000; >>>>>>> e10ce4e26b3e33ab0cb34449a8df7fe954929762 FILE* s_stats; int nFile = 0; // Nth File saves LB int cptFlash; int *cumul; int * restrict neighbours; // NeighbourWall Bottom, Right, Left & Top int * restrict neighbourWallB; int * restrict neighbourWallR; int * restrict neighbourWallL; int * restrict neighbourWallT; int nNeighWallb, nNeighWallt, nNeighWallL, nNeighWallR; int start = 0; long nbsteps = 0; int vib = 0; real freq = 5; real amp = 4.e-4; real t = 0; // Luding Friction Model struct contact { int i, j; real nx, ny; // normal vector from j to i real fn, ft; // force component in local frame }; // struct contact (*c1)[nbgrainsMax]; // struct contact *c2; struct force { real f1, f2, f3; }; struct force * restrict fhf; real * restrict fhf1, * restrict fhf2, * restrict fhf3; struct grain { real x1, x2, x3; real v1, v2, v3; real a1, a2, a3; real r, m, mw, It; real p; // Pressure on grain real s; // shear real f1, f2; // force real ifm, fm; // friction mobility real fr, ifr; // frictional energy wall and Internal real M11, M12, M21, M22; // Moments M11, M12, M21, M22 real ice, slip, rw; // Inelastic Collisional Energy, slip, & Rotational work int z; // number of contacts int zz; // number of contacts sans the Walls }; struct grain * restrict g; // Wall static real Mby = 0.; static real Mgx = 0.; static real Mhy = 0.; static real Mdx = 0.; real ecart_ini = 1.0; // *************************************************************************** // * utilities // *************************************************************************** real Maxt(real x, real y) { if (x < y) return 0.; else return y; } //--------------------------------------------------- real minimum_grain_radius(int nbgrains, struct grain g[nbgrains]) { real rMin = g[0].r; for (int i = 1; i <= nbgrains - 1; i++) { rMin = fmin(rMin, g[i].r); } return rMin; } //---------------------------------------------------- void swap(real* a, real* b) { real tmp; tmp = *a; *a = *b; *b = tmp; } // ******************************************************************* // * Output files * // ******************************************************************* void write_vtk(int nx, int ny, real f[nx][ny][Q], int nbgrains, struct grain g[nbgrains]) { char filename[255]; sprintf(filename, "lbm-dem_%.6i", nFile); char gpress[255]; sprintf(gpress, "grain_pressure_%.6i", nFile); char gvel[255]; sprintf(gvel, "grain_velocity_%.6i", nFile); char gacc[255]; sprintf(gacc, "grain_acceleration_%.6i", nFile); char fpress[255]; sprintf(fpress, "fluid_pressure_%.6i", nFile); char fvel[255]; sprintf(fvel, "fluid_velocity_%.6i", nFile); int dims[] = {nx, ny, 1}; float *xs = malloc(sizeof(float)*nx); float *ys = malloc(sizeof(float)*ny); float *zs = malloc(sizeof(float)*1); float pasxyz = 1. / nx; for (int i = 0; i < nx; i++) xs[i] = i*pasxyz; for (int i = 0; i < ny; i++) ys[i] = i*pasxyz; *zs = 0; int nvars = 5; int vardims[5][1] = {{1}, {3}, {3}, {1}, {3}}; int centering[5][1] = {{1}, {1}, {1}, {1}, {1}}; char* varnames[5][1] = {{"grain_pressure"}, {"grain_velocity"}, {"grain_acceleration"}, {"fluid_pressure"}, {"fluid_velocity"}}; char* filenames[] = {gpress, gvel, gacc, fpress, fvel}; float (*grain_pressure )[nx] = malloc(sizeof(float)*nx*ny); float (*grain_velocity )[nx][3] = malloc(sizeof(float)*nx*ny*3); float (*grain_acceleration)[nx][3] = malloc(sizeof(float)*nx*ny*3); float (*fluid_pressure )[nx] = malloc(sizeof(float)*nx*ny); float (*fluid_velocity )[nx][3] = malloc(sizeof(float)*nx*ny*3); float* vars[5][1] = {{(float*)grain_pressure}, {(float*)grain_velocity}, {(float*)grain_acceleration}, {(float*)fluid_pressure}, {(float*)fluid_velocity}}; for (int y = 0; y < ny; y++) { for (int x = 0; x < nx; x++) { int i = obst[x][y]; if (i >= 0 && i < nbgrains) { grain_pressure[y][x] = g[i].p; grain_velocity[y][x][0] = g[i].v1; grain_velocity[y][x][1] = g[i].v2; grain_velocity[y][x][2] = 0.0; grain_acceleration[y][x][0] = g[i].a1; grain_acceleration[y][x][1] = g[i].a2; grain_acceleration[y][x][2] = 0.0; fluid_pressure[y][x] = 0.0; fluid_velocity[y][x][0] = 0.0; fluid_velocity[y][x][1] = 0.0; fluid_velocity[y][x][2] = 0.0; } else { grain_pressure[y][x] = -1; grain_velocity[y][x][0] = 0.0; grain_velocity[y][x][1] = 0.0; grain_velocity[y][x][2] = 0.0; grain_acceleration[y][x][0] = 0.0; grain_acceleration[y][x][1] = 0.0; grain_acceleration[y][x][2] = 0.0; fluid_pressure[y][x] = 0.0; fluid_velocity[y][x][0] = 0.0; fluid_velocity[y][x][1] = 0.0; fluid_velocity[y][x][2] = 0.0; for (int j = 0; j < Q; ++j) { fluid_pressure[y][x] += f[x][y][j]; fluid_velocity[y][x][0] += f[x][y][j] * ex[j]; fluid_velocity[y][x][1] += f[x][y][j] * ey[j]; } fluid_pressure[y][x] = (1. / 3.) * rho_moy * (fluid_pressure[y][x] - 1.); } } } // write_rectilinear_mesh(filename, 1, dims, xs, ys, zs, nvars, vardims, centering, varnames, vars); for (int i = 0; i < 5; ++i) write_rectilinear_mesh(filenames[i], 1, dims, xs, ys, zs, 1, vardims[i], centering[i], varnames[i], vars[i]); free(xs); free(ys); free(zs); free(grain_pressure); free(grain_velocity); free(grain_acceleration); free(fluid_velocity); free(fluid_pressure); } void write_DEM() { int i; char filename[25]; real N0, N1, N2, N3, N4, N5; // Percentage of particles in contact real xgrainmax; FILE* outfile; // Output file // sprintf(filename,"DEM_Grains%.6i.dat",nFile); sprintf(filename, "DEM%.6i.dat", nFile); outfile = fopen(filename, "w"); xfront = g[0].x1 + g[0].r; height = g[0].x2 + g[0].r; energie_cin = 0.; energie_x = 0.; energie_y = 0.; energie_teta = 0.; energy_p = 0.; energy_EPE = 0.; SE = 0.; ESE = 0.; WF = 0.; IFR = 0.; INCE = 0.; TSLIP = 0.; TRW = 0.; zmean = 0; xgrainmax = g[0].x1; N0 = 0; N1 = 0; N2 = 0; N3 = 0; N4 = 0; N5 = 0; for (i = 0; i < nbgrains; i++) { zmean += g[i].z; if (g[i].z == 0) N0 += 1; if (g[i].z == 1) N1 += 1; if (g[i].z == 2) N2 += 1; if (g[i].z == 3) N3 += 1; if (g[i].z == 4) N4 += 1; if (g[i].z == 5) N5 += 1; energie_x += 0.5 * g[i].m * g[i].v1 * g[i].v1; energie_y += 0.5 * g[i].m * g[i].v2 * g[i].v2; energie_teta += 0.5 * g[i].It * g[i].v3 * g[i].v3; energy_p += g[i].m * G * g[i].x2; /* if (nbsteps*dt>=dtt) */ SE += 0.5 * (((g[i].p * g[i].p) / kg) + ((g[i].s * g[i].s) / kt)); WF += g[i].fr; g[i].ifr = fabs(((g[i].m * G + g[i].f2) * (dt * g[i].v2 + dt2 * g[i].a2 / 2.)) + (g[i].f1 * (dt * g[i].v1 + dt2 * g[i].a1 / 2.))); // g[i].ifr=(g[i].f2*(dt*g[i].v2))+(g[i].f1*(dt*g[i].v1)); IFR += g[i].ifr; TSLIP += g[i].slip; TRW += g[i].rw; INCE += g[i].ice; TBW += g[i].ifr; ESE = 0.5 * (((g[i].p * g[i].p) / kg) + ((g[i].s * g[i].s) / kt)); TSE += ESE; // if(g[i].x2>(-1.632*g[i].x1+0.0408)){energy_EPE+=g[i].m*G*g[i].x2;} if (g[i].x1 + g[i].r > xgrainmax) { xgrainmax = g[i].x1 + g[i].r; } if (g[i].x2 + g[i].r > height) { height = g[i].x2 + g[i].r; } if (g[i].zz > 0 && g[i].x1 + g[i].r >= xfront) { xfront = g[i].x1 + g[i].r; } if (g[i].z == 0) { g[i].fm = 0; } else g[i].fm = g[i].ifm / g[i].z; fprintf(outfile, "%i\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%" "le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%" "le\t%i\n", i, g[i].r, g[i].x1, g[i].x2, g[i].x3, g[i].v1, g[i].v2, g[i].v3, g[i].a1, g[i].a2, g[i].a3, fhf1[i], fhf2[i], fhf3[i], g[i].p, g[i].s, ESE, g[i].fr, g[i].ifr, g[i].ice, g[i].slip, g[i].rw, g[i].fm, g[i].M11, g[i].M12, g[i].M21, g[i].M22, g[i].z); } energie_cin = energie_x + energie_y + energie_teta; zmean = zmean / nbgrains; s_stats = fopen("stats.data", "a"); fprintf(s_stats, "%le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le " "%le %le %le %le %le\n", nbsteps * dt - dtt, xfront, xgrainmax, height, zmean, energie_x, energie_y, energie_teta, energie_cin, N0 / nbgrains, N1 / nbgrains, N2 / nbgrains, N3 / nbgrains, N4 / nbgrains, N5 / nbgrains, energy_p, SE, WF, IFR, INCE, TSLIP, TRW); fclose(s_stats); fclose(outfile); } void write_forces() { int i, j; real dn; char nomfile[25]; FILE* outfile1; // Ouverture du fichier // sprintf(filename,"DEM_Grains%.6i.dat",nFile); sprintf(nomfile, "DEM%.6i.ps", nFile); outfile1 = fopen(nomfile, "w"); real margin = 10 * g[0].r, hrx1 = lx, hry2 = ly; fprintf(outfile1, "%%!PS-Adobe-3.0 EPSF-3.0 \n"); fprintf(outfile1, "%%%BoundingBox: %f %f %f %f \n", -margin, -margin, hrx1 + margin, hry2 + margin); fprintf(outfile1, "%%%Creator: Krishna Kumar \n"); fprintf(outfile1, "%%%Title: DEM Grains & Forces \n"); fprintf(outfile1, "0.1 setlinewidth 0.0 setgray \n"); for (i = 0; i <= nbgrains; i++) fprintf(outfile1, "newpath %le %le %le 0.0 setlinewidth %.2f setgray 0 360 arc gsave " "fill grestore\n", g[i].x1 * 10000, g[i].x2 * 10000, g[i].r * 10000, (0.8 - g[i].fm / 2)); for (i = 0; i <= nbgrains; i++) { for (j = 0; j <= nbgrains; j++) { dn = (sqrt((g[i].x1 - g[j].x1) * (g[i].x1 - g[j].x1) + (g[i].x2 - g[j].x2) * (g[i].x2 - g[j].x2))) - g[i].r - g[j].r; if (dn < -1e-10 && i != j) { // printf("dn for i %i and j %i are: %le \n",i,j,dn); fprintf(outfile1, "%le setlinewidth \n 0.0 setgray \n", 1.); // c1[i][j].fn); fprintf(outfile1, "1 setlinecap \n newpath \n"); fprintf(outfile1, "%le %le moveto \n %le %le lineto\n", g[i].x1 * 10000, g[i].x2 * 10000, g[j].x1 * 10000, g[j].x2 * 10000); fprintf(outfile1, "stroke \n"); } } } fclose(outfile1); } // -------------------------- void write_densities() { int x, y, i; real pasxyz; real P, u_x, u_y; char filename[25]; char filename_press[25]; FILE* outfile; FILE* s_press; sprintf(filename, "densities%.6i.vtk", nFile); sprintf(filename_press, "pressure_base%.6i.dat", nFile); pasxyz = 1. / lx; outfile = fopen(filename, "w"); s_press = fopen(filename_press, "w"); fprintf(outfile, "# vtk DataFile Version 2.0\n"); fprintf(outfile, "Outfile domain LB t: %e\n", t); fprintf(outfile, "ASCII\n"); fprintf(outfile, "DATASET RECTILINEAR_GRID\n"); fprintf(outfile, "DIMENSIONS %d %d 1\n", lx, ly); fprintf(outfile, "X_COORDINATES %d float\n", lx); for (i = 0; i <= lx - 1; i++) { fprintf(outfile, "%e ", (float)i * pasxyz); } fprintf(outfile, "\n"); fprintf(outfile, "Y_COORDINATES %d float\n", ly); for (i = 0; i <= ly - 1; i++) { fprintf(outfile, "%e ", (float)i * pasxyz); } fprintf(outfile, "\n"); fprintf(outfile, "Z_COORDINATES 1 float\n"); fprintf(outfile, "0\n"); // Pour LB fprintf(outfile, "POINT_DATA %d\n", lx * ly); fprintf(outfile, "SCALARS Pressure float 1\n"); fprintf(outfile, "LOOKUP_TABLE default\n"); for (y = 0; y < ly; y++) { for (x = 0; x < lx; x++) { P = 0.; for (i = 0; i < Q; i++) { P += f[x][y][i]; } P = (1. / 3.) * rho_moy * (P - 1.); if (obst[x][y] < 0) { fprintf(outfile, "%.4lf\n", P); if (y == 2) { fprintf(s_press, "%le %le\n", x * pasxyz, P); } } else { fprintf(outfile, "%.4lf\n", 0.); if (y == 2) { fprintf(s_press, "%le %le\n", x * pasxyz, 0.0); } } } } fprintf(outfile, "VECTORS VecVelocity float\n"); for (y = 0; y < ly; y++) { for (x = 0; x < lx; x++) { // P=rho_moy; u_x = 0.; u_y = 0.; for (i = 0; i < Q; i++) { u_x += f[x][y][i] * ex[i]; u_y += f[x][y][i] * ey[i]; } // P = (P-rho_moy)*1./3.; // P = (1./3.)*rho_moy*(P-1.); if (obst[x][y] < 0) { fprintf(outfile, "%.4lf %.4lf 0.\n", u_x, u_y); } else { fprintf(outfile, "%.4lf %.4lf 0.\n", 0., 0.); } } } fclose(s_press); fclose(outfile); } // ******************************************************************* // * sample initial * // ******************************************************************* void temp_sample() { long i, j, k; j = 0; k = 0; for (i = 0; i < nbgrains; ++i) { g[i].r = r; //*(real)(i+1)/nbgrains; g[i].m = rhoS * pi * g[i].r * g[i].r; #ifdef _FLUIDE_ g[i].mw = rhoW * pi * g[i].r * g[i].r; #else g[i].mw = 0; #endif // g[i].m=(4./3.)*rhoS*pi*g[i].r*g[i].r*g[i].r; g[i].It = g[i].m * g[i].r * g[i].r / 2; // g[i].It=(2./5.)*g[i].m*g[i].r*g[i].r; g[i].x1 = r * 1.5 + 2. * r * j; // Pente r*(1.5+(k/10.)) g[i].x2 = r + 2. * r * k; g[i].x3 = 0.; g[i].v1 = 0.; g[i].v2 = 0.; g[i].v3 = 0.; g[i].a1 = 0.; g[i].a2 = 0.; g[i].a3 = 0.; // if(j<=4) {j++;} else {j=0;k++;}; if (j < 0 && k == 0) { j++; } else { if (j <= 13) { j++; } else { j = 0; k++; }; }; } } struct grain* read_sample(char * filename_sample) { FILE *sample_file = fopen(filename_sample, "r"); char com[256]; fgets(com, 256, sample_file); printf("%s\n", com); fscanf(sample_file, "%d\n", &nbgrains); struct grain *g = malloc(sizeof(struct grain)*nbgrains); printf("Nb grains %d\n", nbgrains); for (int i = 0; i < nbgrains; ++i) { fscanf(sample_file, FLOAT_FORMAT" "FLOAT_FORMAT" "FLOAT_FORMAT";\n", &g[i].r, &g[i].x1, &g[i].x2); // printf("%le %le %le\n",g[i].r,g[i].x1,g[i].x2); g[i].r = g[i].r * r; g[i].m = rhoS * pi * g[i].r * g[i].r; g[i].It = g[i].m * g[i].r * g[i].r / 2; g[i].x1 = g[i].x1 * r; g[i].x2 = g[i].x2 * r; g[i].x3 = 0.; g[i].v1 = 0.; g[i].v2 = 0.; g[i].v3 = 0.; g[i].a1 = 0.; g[i].a2 = 0.; g[i].a3 = 0.; } fclose(sample_file); return g; } void check_sample(int nbgrains, struct grain g[nbgrains]) { real xMax = g[0].x1; real xMin = g[0].x1; real yMax = g[0].x2; real yMin = g[0].x2; real MassGrain = 0.; for (int i = 0; i < nbgrains; ++i) { MassGrain += g[i].m; xMax = fmax(xMax, g[i].x1 + g[i].r); xMin = fmin(xMin, g[i].x1 - g[i].r); yMax = fmax(yMax, g[i].x2 + g[i].r); yMin = fmin(yMin, g[i].x2 - g[i].r); } real L0 = xMax - xMin; real H0 = yMax - yMin; printf("L0=%le H0=%le Mass of Grains=%le Phi=%le\n", L0, H0, MassGrain, MassGrain / (rhoS * (L0 * H0))); } // ******************************************************************************************* // * Initialise obstacle array * // ******************************************************************************************* void init_obst() { int x, y, i, xi, yi, xf, yf; // c.d.g. sphere // real xc,yc; real dist2, r2, R2, xc, yc, rbl0; for (x = 1; x < lx - 1; x++) { for (y = 1; y < ly - 1; y++) { obst[x][y] = -1; } } for (x = 0; x < lx; x++) { obst[x][0] = obst[x][ly - 1] = nbgrains; act[x][0] = act[x][ly - 1] = 0; } for (y = 1; y < ly - 1; y++) { obst[0][y] = obst[lx - 1][y] = nbgrains; act[0][y] = act[lx - 1][y] = 0; } for (i = 0; i < nbgrains; i++) { xc = (g[i].x1 - Mgx) / dx; yc = (g[i].x2 - Mby) / dx; r2 = rLB[i] * rLB[i]; // Unreduced grain radius rbl0 = g[i].r / dx; R2 = rbl0 * rbl0; xi = (int)(xc - rbl0); xf = (int)(xc + rbl0); if (xi < 1) xi = 1; if (xf >= lx - 1) xf = lx - 2; yi = (int)(yc - rbl0); yf = (int)(yc + rbl0); if (yi < 1) yi = 1; if (yf >= ly - 1) yf = ly - 2; for (x = xi; x <= xf; x++) { for (y = yi; y <= yf; y++) { dist2 = (x - xc) * (x - xc) + (y - yc) * (y - yc); if (dist2 <= R2) { if (dist2 <= r2) obst[x][y] = i; } } } } } // ******************************************************************************************* // * Initialise density distribution function with equilibrium to zero density // * // ******************************************************************************************* void init_density(int nx, int ny, real f[nx][ny][Q]) { for (int x = 0; x < nx; x++) { for (int y = 0; y < ny; y++) { for (int iLB = 0; iLB < Q; iLB++) { f[x][y][iLB] = w[iLB]; } } } } // ******************************************************************* // * Calculate the forces on grains * // ******************************************************************* struct force force_grains(long i, long j) { // distance normale real dn, xOiOj, yOiOj, OiOj; real xn, yn; real vn, vxOiOj, vyOiOj, vt; real ftest; struct force f; double fn, ft; // distance relative xOiOj = g[i].x1 - g[j].x1; yOiOj = g[i].x2 - g[j].x2; OiOj = sqrt(xOiOj * xOiOj + yOiOj * yOiOj); dn = OiOj - g[i].r - g[j].r; // calculate the forces if (dn >= 0) { f.f1 = 0; f.f2 = 0; f.f3 = 0; } else { // relative normal velocity vxOiOj = g[i].v1 - g[j].v1; vyOiOj = g[i].v2 - g[j].v2; xn = xOiOj / OiOj; yn = yOiOj / OiOj; // Compute velocities at contact vn = vxOiOj * xn + vyOiOj * yn; // Tangential Velocity vt = -vxOiOj * yn + vyOiOj * xn - g[i].v3 * g[i].r - g[j].v3 * g[j].r; // calculate normal force fn = -kg * dn - nug * vn; if (fn < 0) fn = 0.0; ft = - kt * vt * dt; ftest = mu * fn; if (fabs(ft) > ftest) { if (ft < 0.0) ft = ftest; else ft = -ftest; } f.f1 = fn * xn - ft * yn; f.f2 = fn * yn + ft * xn; f.f3 = -Maxt(ft * g[i].r, fn * murf * g[i].r * g[j].r); g[i].p += fn; g[j].p += fn; g[i].f1 += f.f1; g[i].f2 += f.f2; g[i].s += ft; g[j].s += ft; g[i].slip += fabs(ft) * (fabs(vt * dt) + (fabs(ft - pft)) / kt); pft = ft; g[i].rw += fabs(f.f3) * (fabs(g[i].v3 * dt) + (fabs(f.f3 - pff)) / kt); pff = f.f3; g[i].z += 1; g[i].zz += 1; g[i].ice += ic; if (fn == 0) g[i].ifm = 0; else g[i].ifm += fabs(ft / (mu * fn)); // Stress computations g[i].M11 += f.f1 * xOiOj; g[i].M12 += f.f1 * yOiOj; g[i].M21 += f.f2 * xOiOj; g[i].M22 += f.f2 * yOiOj; } return f; } // ******************************************************************* // * Calculation of forces between the grains and Walls * // ******************************************************************* struct force force_WallB(long i, real dn) { real vn, vt, ftest; struct force f; real fn, ft; vn = g[i].v2; vt = g[i].v1; fn = -km * dn - num * vn; if (fn < 0) fn = 0.; ft = ktm * vt; //*dt; //Krishna ftest = mumb * fn; if (fabs(ft) > ftest) { if (ft < 0.0) ft = ftest; else ft = -ftest; } f.f1 = ft; f.f2 = fn; f.f3 = -(ft * g[i].r * murf); g[i].p += fn; g[i].s += ft; g[i].f1 += f.f1; g[i].z += 1; // Stress computations g[i].M11 += 0; g[i].M12 += f.f1 * dt; g[i].M21 += 0; g[i].M22 += f.f2 * dt; g[i].rw += fabs(f.f3) * (fabs(g[i].v3 * dt) + (fabs(f.f3 - pff)) / kt); g[i].fr += fabs(ft) * (fabs(vt * dt) + (fabs(ft - pft)) / kt); pff = f.f3; pft = ft; return f; } struct force force_WallT(long i, real dn) { real vn, vt, fn, ft, ftmax; struct force f; vn = g[i].v2; fn = km * dn - num * vn; ic += num * vn * vn * dt; if (fn > 0.) fn = 0.; // relative tangential velocity vt = g[i].v1 + g[i].v3 * g[i].r - amp * freq * cos(freq * t); ft = fabs(ktm * vt); // if (nbsteps*dt<dtt){mumb=mug;nugt=0;} if (vt >= 0) { ftmax = mumb * fn - nugt * vt; } // ic+=nugt*vt*vt*dt;} else { ftmax = mumb * fn + nugt * vt; } // ic+=nug*vt*vt*dt;} // ftmax=mum*fn-num*vt; if (ft > ftmax) ft = ftmax; if (vt > 0) ft = -ft; f.f1 = ft; f.f2 = fn; // f.f3=ft*g[i].r-fabs(murf*g[i].r*g[i].v3*fn); f.f3 = ft * g[i].r * murf; // f.f3=(ft-fabs(vt*nuf))*g[i].r; // Stress computations g[i].M11 += 0; g[i].M12 += f.f1 * fabs(dt); g[i].M21 += 0; g[i].M22 += f.f2 * fabs(dt); g[i].p += fn; g[i].s += ft; g[i].z += 1; // g[i].rw+=fabs(f.f3)*(fabs(g[i].v3*dt)+(fabs(f.f3-pff))/kt); // g[i].fr+=fabs(ft)*(fabs(vt*dt)+(fabs(ft-pft))/kt); // pff=f.f3;pft=ft; return f; } struct force force_WallL(long i, real dn) { real vn, fn, vt, ft; struct force f; vn = g[i].v1; fn = -km * dn + num * vn; ic += num * vn * vn * dt; if (fn < 0.) fn = 0.; vt = g[i].v2; if (vt > 0) ft = mum * fn; else ft = mum * fn; if (vt > 0) ft = -ft; f.f1 = fn; f.f2 = ft; // f.f2=ft*g[i].r-fabs(murf*g[i].r*g[i].v3*fn); f.f3 = ft * g[i].r * murf; // f.f3=(ft-fabs(vt*nuf))*g[i].r; // Stress computations g[i].M11 += f.f1 * fabs(dt); g[i].M12 += 0; g[i].M21 += f.f2 * fabs(dt); g[i].M22 += 0; g[i].p += fn; g[i].s += ft; g[i].f1 += f.f1; g[i].z += 1; g[i].ice += ic; g[i].rw += fabs(f.f3) * fabs(g[i].v3 * dt); g[i].fr += fabs(ft) * (fabs(vt * dt) + (fabs(ft - pft)) / kt); pft = ft; return f; } struct force force_WallR(long i, real dn) { real vn, fn, vt, ft; struct force f; vn = g[i].v1; fn = km * dn - num * vn; // ic+=num*vn*vn*dt; vt = g[i].v2; // tangential velcoty ft = mum * fn; // ic+=nugt*vt*vt*dt; // ftmax=mum*fn-num*vt; if (vt > 0) ft = -ft; if (fn > 0.) fn = 0.; f.f1 = fn; f.f2 = -ft; f.f3 = ft * g[i].r * murf; g[i].p += fn; g[i].f1 += f.f1; // g[i].ice +=ic; // g[i].fr+=fabs(ft)*(fabs(vt*dt)+(fabs(f.f3-pft))/kt); pft = ft; // Stress computations g[i].M11 += f.f1 * fabs(dt); g[i].M12 += 0; g[i].M21 += f.f2 * fabs(dt); g[i].M22 += 0; // g[i].s += ft; g[i].z += 1; return f; } // ******************************************************************* // * * // * * // * * // * Calculate the hydrodynamic forces * // * * // * * // * * // * * // ******************************************************************* // ******************************************************************************************* // * Reinitialise density distributions for nodes that change state from solid // to fluid * // ******************************************************************************************* void reinit_obst_density() { #pragma omp parallel for for (int x = 1; x < lx - 1; x++) { for (int y = 1; y < ly - 1; y++) { int i = obst[x][y]; if (i != -1) { // Usqu: the standard (^2) the speed of the node from the portion // solid to the fluid portion real u_squ = ((g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) +(g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3) * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)) / (c * c); for (int iLB = 0; iLB < Q; iLB++) { // eu : e.u in formula feq real eu = (ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) + ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)) / c; f[x][y][iLB] = w[iLB] * (1. + 3 * eu + 4.5 * eu * eu - 1.5 * u_squ); //*rho_moy; } } } } } // ******************************************************************************************* // * Obstacle array construction && nodes activity * // ******************************************************************************************* void obst_construction() { int x, y, xp, next_x, next_y, i, iLB, xi, yi, xf, yf; // c.d.g. sphere // real xc,yc; real dist2, aa, bb, cc, r2, xc, yc, R2, rbl0; #pragma omp parallel for for (x = 1; x < lx - 1; x++) { for (y = 1; y < ly - 1; y++) { obst[x][y] = -1; act[x][y] = 1; for (iLB = 1; iLB < Q; iLB++) { delta[x][y][iLB] = 0; } } } #pragma omp parallel for for (i = 0; i < nbgrains; i++) { xc = (g[i].x1 - Mgx) / dx; yc = (g[i].x2 - Mby) / dx; r2 = rLB[i] * rLB[i]; rbl0 = g[i].r / dx; // JYD2 R2 = rbl0 * rbl0; // xi=xc-rLB[i]; xf=xc+rLB[i]; if(xi<1) xi=1; if(xf>=lx-1) xf=lx-2; // yi=yc-rLB[i]; yf=yc+rLB[i]; if(yi<1) yi=1; if(yf>=ly-1) yf=ly-2; xi = (int)(xc - rbl0); xf = (int)(xc + rbl0); if (xi < 1) xi = 1; if (xf >= lx - 1) xf = lx - 2; yi = (int)(yc - rbl0); yf = (int)(yc + rbl0); if (yi < 1) yi = 1; if (yf >= ly - 1) yf = ly - 2; for (y = yi; y <= yf; y++) { for (x = xi; x <= xf; x++) { dist2 = (x - xc) * (x - xc) + (y - yc) * (y - yc); if (dist2 <= R2) { if (dist2 <= r2) obst[x][y] = i; } // if(dist2<=r2) obst[x][y]=i; } } // * Obstacle in inteaction with fluid (active obstacles) for (y = yi; y <= yf; y++) { for (x = xi; x <= xf; x++) { if (obst[x][y] == i) { act[x][y] = 0; // Search fluid node neighbourss for (iLB = 1; iLB < Q; iLB++) { next_x = x + ex[iLB]; // if (next_x<0) next_x=0; if (next_x>=lx) // next_x=lx-1; next_y = y + ey[iLB]; // if (next_y<0) next_y=0; if (next_y>=ly) // next_y=ly-1; if (obst[next_x][next_y] == -1) { // Calculating the distance between the node fluid and the wall of the particle // (Klaus-Nils-Ulrich) act[x][y] = 1; xp = x; aa = fabs(ex[iLB]) + fabs(ey[iLB]); bb = (xp + ex[iLB] - xc) * ex[iLB] + (y + ey[iLB] - yc) * ey[iLB]; cc = (xp + ex[iLB] - xc) * (xp + ex[iLB] - xc) + (y + ey[iLB] - yc) * (y + ey[iLB] - yc) - r2; delta[x][y][iLB] = (bb - sqrt(fabs(bb * bb - aa * cc))) / aa; } } } } } } } // ************************************************************************************************ // * Principal LB: Collision - (Streaming + Boundary Conditions) * // ************************************************************************************************ void collision_streaming() { const int half = (Q - 1) / 2; const real a = 1. / 36; // Post-collision part computation // (Yu-Mei-Luo-Shyy) #pragma omp parallel for for (int x = 1; x < lx - 1; x++) { for (int y = 1; y < ly - 1; y++) { if (obst[x][y] == -1) { real rho = f[x][y][0] + f[x][y][1] + f[x][y][2] + f[x][y][3] + f[x][y][4] + f[x][y][5] + f[x][y][6] + f[x][y][7] + f[x][y][8]; real e = -4 * f[x][y][0] + 2 * f[x][y][1] - f[x][y][2] + 2 * f[x][y][3] - f[x][y][4] + 2 * f[x][y][5] - f[x][y][6] + 2 * f[x][y][7] - f[x][y][8]; real eps = 4 * f[x][y][0] + f[x][y][1] - 2 * f[x][y][2] + f[x][y][3] - 2 * f[x][y][4] + f[x][y][5] - 2 * f[x][y][6] + f[x][y][7] - 2 * f[x][y][8]; real j_x = f[x][y][5] + f[x][y][6] + f[x][y][7] - f[x][y][1] - f[x][y][2] - f[x][y][3]; real q_x = -f[x][y][1] + 2 * f[x][y][2] - f[x][y][3] + f[x][y][5] - 2 * f[x][y][6] + f[x][y][7]; real j_y = f[x][y][1] + f[x][y][8] + f[x][y][7] - f[x][y][3] - f[x][y][4] - f[x][y][5]; real q_y = f[x][y][1] - f[x][y][3] + 2 * f[x][y][4] - f[x][y][5] + f[x][y][7] - 2 * f[x][y][8]; real p_xx = f[x][y][2] - f[x][y][4] + f[x][y][6] - f[x][y][8]; real p_xy = -f[x][y][1] + f[x][y][3] - f[x][y][5] + f[x][y][7]; real j_x2 = j_x * j_x; real j_y2 = j_y * j_y; real eO = e - s2 * (e + 2 * rho - 3 * (j_x2 + j_y2) / rho); real epsO = eps - s3 * (eps - rho + 3 * (j_x2 + j_y2) / rho); real q_xO = q_x - s5 * (q_x + j_x); real q_yO = q_y - s7 * (q_y + j_y); real p_xxO = p_xx - s8 * (p_xx - (j_x2 - j_y2) / rho); real p_xyO = p_xy - s9 * (p_xy - j_x * j_y / rho); f[x][y][0] = a * (4*rho - 4 * eO + 4 * epsO); f[x][y][2] = a * (4*rho - eO - 2*epsO - 6*j_x + 6*q_xO + 9*p_xxO); f[x][y][4] = a * (4*rho - eO - 2*epsO - 6*j_y + 6*q_yO - 9*p_xxO); f[x][y][6] = a * (4*rho - eO - 2*epsO + 6*j_x - 6*q_xO + 9*p_xxO); f[x][y][8] = a * (4*rho - eO - 2*epsO + 6*j_y - 6*q_yO - 9*p_xxO); f[x][y][1] = a * (4*rho + 2*eO + epsO - 6*j_x - 3*q_xO + 6*j_y + 3*q_yO - 9*p_xyO); f[x][y][3] = a * (4*rho + 2*eO + epsO - 6*j_x - 3*q_xO - 6*j_y - 3*q_yO + 9*p_xyO); f[x][y][5] = a * (4*rho + 2*eO + epsO + 6*j_x + 3*q_xO - 6*j_y - 3*q_yO - 9*p_xyO); f[x][y][7] = a * (4*rho + 2*eO + epsO + 6*j_x + 3*q_xO + 6*j_y + 3*q_yO + 9*p_xyO); } } } // To calculate the edges; see the book Lattice Boltzmann Modeling // Bounce back for y=0 & y=ly-1 for (int x = 1; x < lx - 1; x++) { f[x][0][8] = f[x][1][4]; f[x][0][7] = f[x + 1][1][3]; //;+uw_b/6; f[x][0][1] = f[x - 1][1][5]; //-uw_b/6; // Top plate f[x][ly - 1][4] = f[x][ly - 2][8]; f[x][ly - 1][3] = f[x - 1][ly - 2][7]; //-uw_h/6; f[x][ly - 1][5] = f[x + 1][ly - 2][1]; //+uw_h/6; } for (int y = 1; y < ly - 1; y++) { f[0][y][6] = f[1][y][2]; f[0][y][7] = f[1][y + 1][3]; //;+uw_b/6; f[0][y][5] = f[1][y - 1][1]; //-uw_b/6; f[lx - 1][y][2] = f[lx - 2][y][6]; f[lx - 1][y][3] = f[lx - 2][y - 1][7]; //-uw_h/6; f[lx - 1][y][1] = f[lx - 2][y + 1][5]; //+uw_h/6; } // corner nodes f[0][0][7] = f[1][1][3]; f[lx - 1][0][1] = f[lx - 2][1][5]; //+uw_b/6 f[0][ly - 1][5] = f[1][ly - 2][1]; //-uw_b/6 f[lx - 1][ly - 1][3] = f[lx - 2][ly - 2][7]; // bounce back in obstacles ///////////////////////////////////////////////////////// // To calculate force f[][][]) // // 1: articlel of JYD-Mouloud // // 2: article of Klaus-Nils-Ulrich // // 3: article of Yu-Mei-Luo-Shyy // ///////////////////////////////////////////////////////// for (int x = 1; x < lx - 1; x++) { for (int y = 1; y < ly - 1; y++) { int i = obst[x][y]; if (i != -1 && act[x][y] == 1) { for (int iLB = 1; iLB <= half; iLB++) { int next_x = x + ex[iLB]; int next_y = y + ey[iLB]; if (obst[next_x][next_y] != -1) f[x][y][iLB] = w[iLB]; else //(obst[next_x][next_y]==-1) { // Calculation is based on JYD-Mouloud (2.3.3) if (delta[x][y][iLB] >= 0.5) { f[x][y][iLB] = f[next_x][next_y][iLB + half] / (2 * delta[x][y][iLB]) + (2 * delta[x][y][iLB] - 1) * f[next_x][next_y][iLB] / (2 * delta[x][y][iLB]) + 3 * (w[iLB] / c) * (ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) + ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)) / delta[x][y][iLB]; } if (delta[x][y][iLB] > 0. && delta[x][y][iLB] < 0.5) { int next_xx = next_x + ex[iLB]; int next_yy = next_y + ey[iLB]; f[x][y][iLB] = 2 * delta[x][y][iLB] * f[next_x][next_y][iLB + half] + (1 - 2 * delta[x][y][iLB]) * f[next_xx][next_yy][iLB + half] + 6 * (w[iLB] / c) * (ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) + ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)); } } } for (int iLB = 1 + half; iLB < Q; iLB++) { int next_x = x + ex[iLB]; int next_y = y + ey[iLB]; if (obst[next_x][next_y] != -1) f[x][y][iLB] = w[iLB]; else //(obst[next_x][next_y]==-1) { // Calculation is based on JYD-Mouloud (2.3.3) if (delta[x][y][iLB] >= 0.5) { f[x][y][iLB] = f[next_x][next_y][iLB - half] / (2 * delta[x][y][iLB]) + (2 * delta[x][y][iLB] - 1) * f[next_x][next_y][iLB] / (2 * delta[x][y][iLB]) + 3 * (w[iLB] / c) * (ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) + ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)) / delta[x][y][iLB]; } if (delta[x][y][iLB] > 0. && delta[x][y][iLB] < 0.5) { int next_xx = next_x + ex[iLB]; int next_yy = next_y + ey[iLB]; f[x][y][iLB] = 2 * delta[x][y][iLB] * f[next_x][next_y][iLB - half] + (1 - 2 * delta[x][y][iLB]) * f[next_xx][next_yy][iLB - half] + 6 * (w[iLB] / c) * (ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) + ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)); } } } } } } for (int x = 0; x < lx; x++) { for (int y = 0; y < ly; y++) { for (int iLB = 1; iLB <= half; iLB++) { swap(&f[x][y][iLB], &f[x][y][iLB + half]); } } } for (int x = 0; x < lx; x++) { for (int y = 0; y < ly; y++) { for (int iLB = 1; iLB <= half; iLB++) { int next_x = x + ex[iLB]; // if(next_x<0) next_x=lx-1; int next_y = y + ey[iLB]; if (next_x >= 0 && next_y >= 0 && next_x < lx && next_y < ly) { swap(&f[x][y][iLB + half], &f[next_x][next_y][iLB]); } } } } } // ************************************************************************************************ // * Compute total density to verify of the system that no divergence occurs // when iterating * // ************************************************************************************************ void check_density() { int x, y, iLB; real sum = 0; for (x = 0; x < lx; x++) { for (y = 0; y < ly; y++) { for (iLB = 0; iLB < Q; iLB++) { sum = sum + f[x][y][iLB]; } } } printf("Iteration Number %ld, Total density in the system %f\n", nbsteps, sum); } void final_density() { int x, y, iLB; real sum = 0; for (x = 0; x < lx; x++) { for (y = 0; y < ly; y++) { for (iLB = 0; iLB < Q; iLB++) { sum = sum + f[x][y][iLB]; } } } fprintf(stderr, "final_density: %f\n", sum); } int min(int x, int y) { return (x < y) ? x : y; } int max(int x, int y) { return (x > y) ? x : y; } // **************************************************************************** // * Compute hydrodynamic forces * // **************************************************************************** void forces_fluid(int nx, int ny, real f[nx][ny][Q], int nbgrains, struct grain g[nbgrains]) { const int half = (Q - 1) / 2; for (int i = 0; i < nbgrains; ++i) { fhf1[i] = 0; fhf2[i] = 0; fhf3[i] = 0; } #pragma omp parallel for for (int i = 0; i < nbgrains; ++i) { const real xc = (g[i].x1 - Mgx) / dx; const real yc = (g[i].x2 - Mby) / dx; const real rbl0 = g[i].r / dx; const int xi = max(xc - rbl0, 1); const int xf = min(xc + rbl0, nx-2); const int yi = max(yc - rbl0, 1); const int yf = min(yc + rbl0, ny-2); for (int x = xi; x <= xf; ++x) { for (int y = yi; y <= yf; ++y) { if (i != obst[x][y]) continue; for (int iLB = 1; iLB < Q; ++iLB) { const int next_x = x + ex[iLB]; const int next_y = y + ey[iLB]; if (obst[next_x][next_y] != i) { const int halfq = (iLB <= half) ? half : -half; const real fnx = (f[x][y][iLB + halfq] + f[next_x][next_y][iLB]) * ex[iLB + halfq]; const real fny = (f[x][y][iLB + halfq] + f[next_x][next_y][iLB]) * ey[iLB + halfq]; fhf1[i] = fhf1[i] + fnx; fhf2[i] = fhf2[i] + fny; fhf3[i] = fhf3[i] - fnx * (y - (g[i].x2 - Mby) / dx) + fny * (x - (g[i].x1 - Mgx) / dx); } } } } } #pragma omp parallel for for (int i = 0; i < nbgrains; ++i) { fhf1[i] *= rho_moy * 9 * nu * nu / (dx * (tau - 0.5) * (tau - 0.5)); fhf2[i] *= rho_moy * 9 * nu * nu / (dx * (tau - 0.5) * (tau - 0.5)); fhf3[i] *= dx * rho_moy * 9 * nu * nu / (dx * (tau - 0.5) * (tau - 0.5)); } } //********************************************************** void acceleration_grains() { long i, j; int jdep; real dn, ftest; real fn, ft; struct force fji; if (nbsteps % stepFilm == 0 && start == 1) { // Outfile MGPost // distance normale real xOiOj, yOiOj, OiOj; real xn, yn; real vn, vxOiOj, vyOiOj; real vt; for (i = 0; i <= nbgrains - 1; i++) { g[i].a1 = fhf1[i]; g[i].a2 = fhf2[i]; g[i].a3 = fhf3[i]; } // Summation of forces on the grains for (i = 0; i <= nbgrains - 1; i++) { if (i == 0) jdep = 0; else jdep = cumul[i - 1]; for (j = jdep; j < cumul[i]; j++) { //* // fji=force_grains(i,neighbours[j]); //* // forces_grains xOiOj = g[i].x1 - g[neighbours[j]].x1; yOiOj = g[i].x2 - g[neighbours[j]].x2; OiOj = sqrt(xOiOj * xOiOj + yOiOj * yOiOj); dn = OiOj - g[i].r - g[neighbours[j]].r; if (dn >= 0) { fji.f1 = 0; fji.f2 = 0; fji.f3 = 0; } else { // relative normal velocity vxOiOj = g[i].v1 - g[neighbours[j]].v1; vyOiOj = g[i].v2 - g[neighbours[j]].v2; xn = xOiOj / OiOj; yn = yOiOj / OiOj; vn = vxOiOj * xn + vyOiOj * yn; vt = -vxOiOj * yn + vyOiOj * xn - g[i].v3 * g[i].r - g[neighbours[j]].v3 * g[neighbours[j]].r; fn = -kg * dn - nug * vn; if (fn < 0) fn = 0.0; ft = kt * vt * dt; ftest = mu * ft; if (fabs(ft) > ftest) { if (ft > 0.0) ft = ftest; else ft = -ftest; } //calculate the normal force fji.f1 = fn * xn - ft * yn; fji.f2 = fn * yn + ft * xn; fji.f3 = -ft * g[i].r * murf; g[i].p += fn; g[neighbours[j]].p += fn; g[i].s += ft; g[neighbours[j]].s += ft; g[i].slip += fabs(ft) * (fabs(vt * dt) + (fabs(ft - pft)) / kt); g[neighbours[j]].slip += fabs(ft) * (fabs(vt * dt) + (fabs(ft - pft)) / kt); g[i].rw += fabs(fji.f3) * (fabs(g[i].v3 * dt) + (fabs(fji.f3 - pff)) / kt); g[neighbours[j]].rw += fabs(fji.f3) * (fabs(g[i].v3 * dt) + (fabs(fji.f3 - pff)) / kt); g[i].z += 1; pff = fji.f3; pft = ft; // Stress computations g[i].M11 += fji.f1 * xOiOj; g[i].M12 += fji.f1 * yOiOj; g[i].M21 += fji.f2 * xOiOj; g[i].M22 += fji.f2 * yOiOj; } // end force_grains g[i].a1 = g[i].a1 + fji.f1; g[i].a2 = g[i].a2 + fji.f2; g[i].a3 = g[i].a3 + fji.f3; g[neighbours[j]].a1 = g[neighbours[j]].a1 - fji.f1; g[neighbours[j]].a2 = g[neighbours[j]].a2 - fji.f2; g[neighbours[j]].a3 = g[neighbours[j]].a3 + fji.f3; } } } else { // Calculate normal for (i = 0; i <= nbgrains - 1; i++) { g[i].a1 = fhf1[i]; g[i].a2 = fhf2[i]; g[i].a3 = fhf3[i]; } // summation of forces between the grains for (i = 0; i <= nbgrains - 1; i++) { // printf("cumul(%d)= %d\n",i,cumul[i]); if (i == 0) jdep = 0; else jdep = cumul[i - 1]; for (j = jdep; j < cumul[i]; j++) { // printf("grain(%d), neighbours(%d)= %d\n",i,i,neighbours[j]); fji = force_grains(i, neighbours[j]); g[i].a1 = g[i].a1 + fji.f1; g[i].a2 = g[i].a2 + fji.f2; g[i].a3 = g[i].a3 + fji.f3; g[neighbours[j]].a1 = g[neighbours[j]].a1 - fji.f1; g[neighbours[j]].a2 = g[neighbours[j]].a2 - fji.f2; g[neighbours[j]].a3 = g[neighbours[j]].a3 + fji.f3; } } } // Forces on the botton wall for (i = 0; i < nNeighWallb; i++) { dn = g[neighbourWallB[i]].x2 - g[neighbourWallB[i]].r - Mby; if (dn < 0) { fji = force_WallB(neighbourWallB[i], dn); g[neighbourWallB[i]].a1 = g[neighbourWallB[i]].a1 + fji.f1; g[neighbourWallB[i]].a2 = g[neighbourWallB[i]].a2 + fji.f2; g[neighbourWallB[i]].a3 = g[neighbourWallB[i]].a3 + fji.f3; g[neighbourWallB[i]].fr += fabs(fji.f1) * (fabs(dt * g[i].v1) + fabs(dt2 * g[i].a1) + (fabs(fji.f1 - pf)) / kt); // Friction work at the wall dt2*g[i].a1/2 pf = fji.f1; // Previous force fji.f1 } } // Forces on the Top Wall for (i = 0; i < nNeighWallt; i++) { dn = -g[neighbourWallT[i]].x2 - g[neighbourWallT[i]].r + Mhy; if (dn < 0) { fji = force_WallT(neighbourWallT[i], dn); g[neighbourWallT[i]].a1 = g[neighbourWallT[i]].a1 + fji.f1; g[neighbourWallT[i]].a2 = g[neighbourWallT[i]].a2 + fji.f2; g[neighbourWallT[i]].a3 = g[neighbourWallT[i]].a3 + fji.f3; } } // Forces on the Left Wall for (i = 0; i < nNeighWallL; i++) { dn = g[neighbourWallL[i]].x1 - g[neighbourWallL[i]].r - Mgx; if (dn < 0) { fji = force_WallL(neighbourWallL[i], dn); g[neighbourWallL[i]].a1 = g[neighbourWallL[i]].a1 + fji.f1; g[neighbourWallL[i]].a2 = g[neighbourWallL[i]].a2 + fji.f2; g[neighbourWallL[i]].a3 = g[neighbourWallL[i]].a3 + fji.f3; g[neighbourWallL[i]].fr += fabs(fji.f2) * (fabs(dt * g[i].v1) + fabs(dt2 * g[i].a1) + (fabs(fji.f2 - pf)) / kt); // Friction work at the wall pf = fji.f2; // Previous force fji.f1 } } // Forces on the right Wall for (i = 0; i < nNeighWallR; i++) { dn = -g[neighbourWallR[i]].x1 - g[neighbourWallR[i]].r + Mdx; if (dn < 0) { fji = force_WallR(neighbourWallR[i], dn); g[neighbourWallR[i]].a1 = g[neighbourWallR[i]].a1 + fji.f1; g[neighbourWallR[i]].a2 = g[neighbourWallR[i]].a2 + fji.f2; g[neighbourWallR[i]].a3 = g[neighbourWallR[i]].a3 + fji.f3; } } // calculate acceleration for (i = 0; i <= nbgrains - 1; i++) { g[i].a1 = g[i].a1 / g[i].m + ((g[i].m - g[i].mw) / g[i].m) * xG; g[i].a2 = (g[i].a2 / g[i].m) + ((g[i].m - g[i].mw) / g[i].m) * yG; g[i].a3 = g[i].a3 / g[i].It; } } //********************************************************************** void initVerlet() { int i, j; int jneighbours; real distx, disty; // real distVerlet=.1e-7; jneighbours = 0; for (i = 0; i < nbgrains; i++) { for (j = i + 1; j < nbgrains; j++) { distx = g[i].x1 - g[j].x1; disty = g[i].x2 - g[j].x2; if (((fabs(distx) - g[i].r - g[j].r) <= distVerlet) && ((fabs(disty) - g[i].r - g[j].r) <= distVerlet)) { if ((sqrt(distx * distx + disty * disty) - g[i].r - g[j].r) <= distVerlet) { neighbours[jneighbours] = j; jneighbours++; if (jneighbours == (nbgrains * 6 - 1)) printf("error! size of vector verlet neighbors is outdated"); } } cumul[i] = jneighbours; } // printf("cumul(%d)= %d\n",i,cumul[i]); } } void VerletWall() { int i; nNeighWallb = 0; nNeighWallL = 0; nNeighWallt = 0; nNeighWallR = 0; real dn; // real distVerlet=.1e-7; // Verlet WallB if (nbsteps * dt < dtt) { Mdx = 1.e-3 * lx / 10; Mhy = (1.e-3 * ly / 10); } else { Mdx = 1.e-3 * lx; Mhy = 1.e-3 * ly; } for (i = 0; i < nbgrains; ++i) { dn = g[i].x2 - g[i].r - Mby; if (dn < distVerlet) { neighbourWallB[nNeighWallb] = i; ++nNeighWallb; } } // Verlet WallT for (i = 0; i < nbgrains; ++i) { dn = -g[i].x2 - g[i].r + Mhy; if (dn < distVerlet) { neighbourWallT[nNeighWallt] = i; ++nNeighWallt; } } // Verlet WallL for (i = 0; i < nbgrains; ++i) { dn = g[i].x1 - g[i].r - Mgx; if (dn < distVerlet) { neighbourWallL[nNeighWallL] = i; ++nNeighWallL; } } // Verlet WallR for (i = 0; i < nbgrains; ++i) { dn = -g[i].x1 - g[i].r + Mdx; if (dn < distVerlet) { neighbourWallR[nNeighWallR] = i; ++nNeighWallR; } } } // ******************************************************************************************* // * writing obstacle arrays // * // ******************************************************************************************* void obst_writing() { int x, y, i; char filename1[] = "obst_LB.dat"; FILE* outfile1; outfile1 = fopen(filename1, "w"); for (y = 0; y < ly; y++) { for (x = 0; x < lx; x++) { fprintf(outfile1, "%d ", obst[x][y]); } fprintf(outfile1, "\n"); } fclose(outfile1); char filename2[] = "active_nodes.dat"; FILE* outfile2; outfile2 = fopen(filename2, "w"); for (y = 0; y < ly; y++) { for (x = 0; x < lx; x++) { fprintf(outfile2, "%d ", act[x][y]); } fprintf(outfile2, "\n"); } fclose(outfile2); char filename3[] = "links.dat"; FILE* outfile3; outfile3 = fopen(filename3, "w"); for (y = 0; y < ly; y++) { for (x = 0; x < lx; x++) { for (i = 1; i < Q; i++) { if (delta[x][y][i] != 0) { fprintf(outfile3, "%d %d %d %f\n", x, y, i, delta[x][y][i]); } } } } fclose(outfile3); } // **************************************************************************** // * Output of results to velocity files * // * Distribution of verlocity x - y * // **************************************************************************** void velocity_profile() { int x, y, i; real u_y, d_loc; real u_y1; char filename1[] = "yvel_vs_x.dat"; FILE* outfile1; outfile1 = fopen(filename1, "w"); fprintf(outfile1, "# vitesse u_x ordonnée \n"); y = (int)((g[0].x2 - Mby) / dx); for (x = 0; x < lx; x++) { if (obst[x][y] != -1 && obst[x][y] != nbgrains) u_y1 = g[obst[x][y]].v2 / c; else { u_y = 0; d_loc = 0.; for (i = 0; i < Q; i++) { d_loc = d_loc + f[x][y][i]; } for (i = 0; i < Q; i++) { u_y = u_y + f[x][y][i] * ey[i]; } u_y1 = u_y / d_loc; } fprintf(outfile1, "%d %.10lf\n", x, u_y1); } fclose(outfile1); } //************************************************************ // Calculate pressure * //************************************************************ void pressures() { int x, y; for (x = 0; x < lx; x++) { for (y = 0; y < ly; y++) { if (obst[x][y] == -1) { press[x][y] = (f[x][y][0] + f[x][y][1] + f[x][y][2] + f[x][y][3] + f[x][y][4] + f[x][y][5] + f[x][y][6] + f[x][y][7] + f[x][y][8] - rho_moy) * c_squ; } else press[x][y] = 0.; } } } //---------------------------------------------------------- void renderScene(void) { long i; if (start == 1) { if (vib == 1) { t = t + dt; // Mby=Mby+0.1*amp*sin(freq*t); Mgx = Mgx + amp * sin(freq * t); Mdx = Mdx + amp * sin(freq * t); } // FORCES FLUIDES !!! #ifdef _FLUIDE_ if (nbsteps % npDEM == 0) { reinit_obst_density(); obst_construction(); collision_streaming(); if (nbsteps % stepConsole == 0) check_density(); forces_fluid(lx, ly, f, nbgrains, g); } #endif if (nbsteps % UpdateVerlet == 0) { initVerlet(); VerletWall(); } /* for(i=0; i<=nbgrains_bas-1; i++) { g[i].a1=0.; g[i].a2=0.; g[i].a3=0.; } */ for (i = 0; i <= nbgrains - 1; i++) { g[i].p = 0; // reset pressure g[i].s = 0.; g[i].ifm = 0; g[i].f1 = 0.; g[i].f2 = 0.; g[i].ice = 0; g[i].fr = 0.; g[i].slip = 0; g[i].rw = 0.; ic = 0.; g[i].M11 = g[i].M12 = g[i].M21 = g[i].M22 = 0.; // Moments g[i].z = 0; // reset coordination numbers g[i].zz = 0; g[i].x1 = g[i].x1 + dt * g[i].v1 + dt2 * g[i].a1 / 2.; g[i].x2 = g[i].x2 + dt * g[i].v2 + dt2 * g[i].a2 / 2.; g[i].x3 = g[i].x3 + dt * g[i].v3 + dt2 * g[i].a3 / 2.; g[i].v1 = g[i].v1 + dt * g[i].a1 / 2.; g[i].v2 = g[i].v2 + dt * g[i].a2 / 2.; g[i].v3 = g[i].v3 + dt * g[i].a3 / 2.; } acceleration_grains(); for (i = 0; i <= nbgrains - 1; i++) { // g[i].p=g[i].p/(2.*M_PI*g[i].r); // pressure on grains g[i].v1 = g[i].v1 + dt * g[i].a1 / 2.; g[i].v2 = g[i].v2 + dt * g[i].a2 / 2.; g[i].v3 = g[i].v3 + dt * g[i].a3 / 2.; } nbsteps++; } if (nbsteps % stepFilm == 0 && start == 1) { #ifdef _FLUIDE_ // write_vtk(lx, ly, f, nbgrains, g); #endif nFile++; } if (nbsteps % stepStrob == 0 && start == 1) { write_DEM(); // write_forces(); } } ////////////////////////////////////////////////////////////////////////////// //////////////////////////////// MAIN ///////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { time_t time_raw_format; struct tm* ptr_time; printf("2D LBM-DEM code\n"); int i; real dtmax; if (argc != 2) { printf("usage: usage %s <filename>\n", argv[0]); exit(EXIT_FAILURE); } printf("Opening file : %s\n", argv[1]); c_squ = 1. / 3.; g = read_sample(argv[1]); check_sample(nbgrains, g); f = malloc(sizeof(real)*lx*ly*Q); assert(f); obst = malloc(sizeof(int)*lx*ly); assert(obst); act = malloc(sizeof(int)*lx*ly); assert(act); delta = malloc(sizeof(real)*lx*ly*Q); assert(delta); rLB = malloc(sizeof(real)*nbgrains); assert(rLB); cumul = malloc(sizeof(int)*nbgrains); assert(cumul); neighbours = malloc(sizeof(int)*nbgrains*6); assert(neighbours); neighbourWallB = malloc(sizeof(int)*nbgrains); assert(neighbourWallB); neighbourWallR = malloc(sizeof(int)*nbgrains); assert(neighbourWallR); neighbourWallL = malloc(sizeof(int)*nbgrains); assert(neighbourWallL); neighbourWallT = malloc(sizeof(int)*nbgrains); assert(neighbourWallT); memset(cumul, 0, sizeof(int)*nbgrains); memset(neighbours, 0, sizeof(int)*nbgrains*6); memset(neighbourWallB, 0, sizeof(int)*nbgrains); memset(neighbourWallR, 0, sizeof(int)*nbgrains); memset(neighbourWallL, 0, sizeof(int)*nbgrains); memset(neighbourWallT, 0, sizeof(int)*nbgrains); // c1 = malloc(sizeof(struct contact)*nbgrainsMax*nbgrainsMax); assert(c1); // c2 = malloc(sizeof(struct contact)*nbgrains); assert(c2); // memset(c1, 0, sizeof(struct contact)*nbgrains*nbgrains); // memset(c2, 0, sizeof(struct contact)*nbgrains); fhf = malloc(sizeof(struct force)*nbgrains); assert(fhf); fhf1 = malloc(sizeof(real)*nbgrains); assert(fhf1); fhf2 = malloc(sizeof(real)*nbgrains); assert(fhf2); fhf3 = malloc(sizeof(real)*nbgrains); assert(fhf3); init_density(lx, ly, f); Mgx = 0.; Mdx = 1.e-3 * lx / 10; Mhy = 1.e-3 * ly / 10; Mby = 0.; xG = -G * sin(angleG); yG = -G * cos(angleG); dx = (1./scale) * (Mdx - Mgx) / (lx - 1); printf("no space %le\n", dx); // Compute the time step for DEM real rMin = minimum_grain_radius(nbgrains, g); dtmax = (1 / iterDEM) * pi * rMin * sqrt(pi * rhoS / kg); dtLB = dx * dx * (tau - 0.5) / (3 * nu); npDEM = (dtLB / dtmax + 1); c = dx / dtLB; dt = dtLB / npDEM; dt2 = dt * dt; printf("dtLB=%le, dtmax=%le, dt=%le, npDEM=%d, c=%lf\n", dtLB, dtmax, dt, npDEM, c); for (i = 0; i <= nbgrains - 1; i++) { rLB[i] = reductionR * g[i].r / dx; } init_obst(); // VerletWall(); time(&time_raw_format); ptr_time = localtime(&time_raw_format); printf("Current local time and date: %s", asctime(ptr_time)); char filename_stats[] = "stats.data"; s_stats = fopen(filename_stats, "w"); fprintf(s_stats, "#1_t 2_xfront 3_xgrainmax 4_height 5_zmean 6_energie_x 7_energie_y " "8_energie_teta 9_energie_cin 10_N0 11_N1 12_N2 13_N3 14_N4 15_N5 " "16_energy_Potential 17_Strain_Energy 18_Frictional_Work " "19_Internal_Friction 20_Inelastic_Collision 21_Slip " "22_Rotational_Work\n"); fclose(s_stats); start = 1; do { renderScene(); time(&time_raw_format); ptr_time = localtime(&time_raw_format); if (nbsteps % UpdateVerlet == 0) printf( "steps %li steps %le KE %le PE %le SE %le WF %le INCE %le SLIP %le " "RW %le Time %s \n", nbsteps, nbsteps * dt, energie_cin, energy_p, SE, WF, INCE, TSLIP, TRW, asctime(ptr_time)); // IFR TSE TBW } while (nbsteps * dt <= duration); final_density(); time(&time_raw_format); ptr_time = localtime(&time_raw_format); printf("End local time and date: %s", asctime(ptr_time)); return 0; }
GB_binop__rminus_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rminus_uint64 // A.*B function (eWiseMult): GB_AemultB__rminus_uint64 // A*D function (colscale): GB_AxD__rminus_uint64 // D*A function (rowscale): GB_DxB__rminus_uint64 // C+=B function (dense accum): GB_Cdense_accumB__rminus_uint64 // C+=b function (dense accum): GB_Cdense_accumb__rminus_uint64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_uint64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_uint64 // C=scalar+B GB_bind1st__rminus_uint64 // C=scalar+B' GB_bind1st_tran__rminus_uint64 // C=A+scalar GB_bind2nd__rminus_uint64 // C=A'+scalar GB_bind2nd_tran__rminus_uint64 // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (y - x) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_UINT64 || GxB_NO_RMINUS_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rminus_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rminus_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rminus_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rminus_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rminus_uint64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rminus_uint64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__rminus_uint64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rminus_uint64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rminus_uint64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t bij = Bx [p] ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rminus_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB_bind1st_tran__rminus_uint64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB_bind2nd_tran__rminus_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
header.h
/*-------------------------------------------------------------------- c--------------------------------------------------------------------- c c header.h c c--------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c The following include file is generated automatically by the c "setparams" utility. It defines c maxcells: the square root of the maximum number of processors c problem_size: 12, 64, 102, 162 (for class T, A, B, C) c dt_default: default time step for this problem size if no c config file c niter_default: default number of iterations for this problem size --------------------------------------------------------------------*/ #ifndef _HEADER_H_ #define _HEADER_H_ #include "npbparams.h" #include "../math/nas_math.h" #define AA 0 #define BB 1 #define CC 2 #define BLOCK_SIZE 5 /* COMMON block: global */ static int grid_points[3]; /* grid_ponts(1:3) */ /* COMMON block: constants */ static double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3; static double dx1, dx2, dx3, dx4, dx5; static double dy1, dy2, dy3, dy4, dy5; static double dz1, dz2, dz3, dz4, dz5; static double dssp, dt; static double ce[5][13]; /* ce(5,13) */ static double dxmax, dymax, dzmax; static double xxcon1, xxcon2, xxcon3, xxcon4, xxcon5; static double dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1; static double yycon1, yycon2, yycon3, yycon4, yycon5; static double dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1; static double zzcon1, zzcon2, zzcon3, zzcon4, zzcon5; static double dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1; static double dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345; static double conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp; static double dttx1, dttx2, dtty1, dtty2, dttz1, dttz2; static double c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6; static double c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16; #define IMAX PROBLEM_SIZE #define JMAX PROBLEM_SIZE #define KMAX PROBLEM_SIZE /* c to improve cache performance, grid dimensions padded by 1 c for even number sizes only. */ /* COMMON block: fields */ static double us[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double vs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double ws[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double qs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double rho_i[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double square[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double forcing[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5+1]; static double u[(IMAX+1)/2*2+1][(JMAX+1)/2*2+1][(KMAX+1)/2*2+1][5]; static double rhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5]; static double lhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][3][5][5]; /* COMMON block: work_1d */ double cuf[PROBLEM_SIZE]; double q[PROBLEM_SIZE]; double ue[PROBLEM_SIZE][5]; double buf[PROBLEM_SIZE][5]; #pragma omp threadprivate(cuf, q, ue, buf) /* c to improve cache performance, grid dimensions (first two for these c to arrays) padded by 1 for even number sizes only. */ /* COMMON block: work_lhs */ static double fjac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5]; /* fjac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */ static double njac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5]; /* njac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */ static double tmp1, tmp2, tmp3; #endif
jacobi-omp.c
/* * Copyright 2013 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <math.h> #include <string.h> #include <openacc.h> #include "timer.h" #define NN 4096 #define NM 4096 double A[NN][NM]; double Anew[NN][NM]; int main(int argc, char** argv) { const int n = NN; const int m = NM; const int iter_max = 1000; const double tol = 1.0e-6; double error = 1.0; memset(A, 0, n * m * sizeof(double)); memset(Anew, 0, n * m * sizeof(double)); for (int j = 0; j < n; j++) { A[j][0] = 1.0; Anew[j][0] = 1.0; } printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m); StartTimer(); int iter = 0; while ( error > tol && iter < iter_max ) { error = 0.0; #pragma omp parallel for shared(m, n, Anew, A) for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]); error = fmax( error, fabs(Anew[j][i] - A[j][i])); } } #pragma omp parallel for shared(m, n, Anew, A) for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { A[j][i] = Anew[j][i]; } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } double runtime = GetTimer(); printf(" total: %f s\n", runtime / 1000); }
GB_binop.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB) // A.*B function (eWiseMult): GB (_AemultB_08) // A.*B function (eWiseMult): GB (_AemultB_02) // A.*B function (eWiseMult): GB (_AemultB_04) // A.*B function (eWiseMult): GB (_AemultB_bitmap) // A*D function (colscale): GB (_AxD) // D*A function (rowscale): GB (_DxB) // C+=B function (dense accum): GB (_Cdense_accumB) // C+=b function (dense accum): GB (_Cdense_accumb) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum) // C=scalar+B GB (_bind1st) // C=scalar+B' GB (_bind1st_tran) // C=A+scalar GB (_bind2nd) // C=A'+scalar GB (_bind2nd_tran) // C type: GB_ctype // A type: GB_atype // A pattern? GB_a_is_pattern // B type: GB_btype // B pattern? GB_b_is_pattern // BinaryOp: GB_binaryop(cij,aij,bij,i,j) #define GB_ATYPE \ GB_atype #define GB_BTYPE \ GB_btype #define GB_CTYPE \ GB_ctype // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ GB_atype_is_btype // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ GB_ctype_is_atype // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ GB_ctype_is_btype // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GB_geta(aij,Ax,pA,A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ GB_a_is_pattern \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GB_getb(bij,Bx,pB,B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ GB_b_is_pattern \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GB_ctype t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ GB_copy_a_to_c(cij,Ax,pA,A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ GB_copy_b_to_c(cij,Bx,pB,B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ GB_binaryop(z,x,y,i,j) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ GB_binaryop_flip // op is second #define GB_OP_IS_SECOND \ GB_op_is_second // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ GB_disable //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ if_is_binop_subset // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } endif_is_binop_subset //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else if_C_dense_update { #include "GB_dense_subassign_23_template.c" } endif_C_dense_update return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else if_C_dense_update { // get the scalar b for C += b, of type GB_btype GB_btype bwork = (*((GB_btype *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } endif_C_dense_update return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ if_binop_is_semiring_multiplier GrB_Info GB (_AxD) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_ctype *restrict Cx = (GB_ctype *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } endif_binop_is_semiring_multiplier //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ if_binop_is_semiring_multiplier GrB_Info GB (_DxB) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_ctype *restrict Cx = (GB_ctype *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } endif_binop_is_semiring_multiplier //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GB_atype alpha_scalar ; GB_btype beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GB_atype *) alpha_scalar_in)) ; beta_scalar = (*((GB_btype *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ if_binop_emult_is_enabled GrB_Info GB (_AemultB_08) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } endif_binop_emult_is_enabled //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ if_binop_emult_is_enabled GrB_Info GB (_AemultB_02) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } endif_binop_emult_is_enabled //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ if_binop_emult_is_enabled GrB_Info GB (_AemultB_04) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } endif_binop_emult_is_enabled //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ if_binop_emult_is_enabled GrB_Info GB (_AemultB_bitmap) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } endif_binop_emult_is_enabled //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ if_binop_bind_is_enabled GrB_Info GB (_bind1st) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_ctype *Cx = (GB_ctype *) Cx_output ; GB_atype x = (*((GB_atype *) x_input)) ; GB_btype *Bx = (GB_btype *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GB_getb(bij, Bx, p, false) ; GB_binaryop(Cx [p], x, bij, 0, 0) ; } return (GrB_SUCCESS) ; #endif } endif_binop_bind_is_enabled //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ if_binop_bind_is_enabled GrB_Info GB (_bind2nd) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GB_ctype *Cx = (GB_ctype *) Cx_output ; GB_atype *Ax = (GB_atype *) Ax_input ; GB_btype y = (*((GB_btype *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GB_geta(aij, Ax, p, false) ; GB_binaryop(Cx [p], aij, y, 0, 0) ; } return (GrB_SUCCESS) ; #endif } endif_binop_bind_is_enabled //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ if_binop_bind_is_enabled // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GB_getb(aij, Ax, pA, false) ; \ GB_binaryop(Cx [pC], x, aij, 0, 0) ; \ } GrB_Info GB (_bind1st_tran) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GB_btype #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_atype x = (*((const GB_atype *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GB_atype } endif_binop_bind_is_enabled //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ if_binop_bind_is_enabled // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GB_geta(aij, Ax, pA, false) ; \ GB_binaryop(Cx [pC], aij, y, 0, 0) ; \ } GrB_Info GB (_bind2nd_tran) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_btype y = (*((const GB_btype *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } endif_binop_bind_is_enabled #endif
nbody-soa.c
#include <math.h> #include <stdio.h> #include <stdlib.h> #include "timer.h" #define SOFTENING 1e-9f typedef struct { float *x, *y, *z, *vx, *vy, *vz; } BodySystem; void randomizeBodies(float *data, int n) { for (int i = 0; i < n; i++) { data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; } } void bodyForce(BodySystem p, float dt, int n) { #pragma omp parallel for schedule(dynamic) for (int i = 0; i < n; i++) { float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f; for (int j = 0; j < n; j++) { float dy = p.y[j] - p.y[i]; float dz = p.z[j] - p.z[i]; float dx = p.x[j] - p.x[i]; float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING; float invDist = 1.0f / sqrtf(distSqr); float invDist3 = invDist * invDist * invDist; Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3; } p.vx[i] += dt*Fx; p.vy[i] += dt*Fy; p.vz[i] += dt*Fz; } } int main(const int argc, const char** argv) { int nBodies = 30000; if (argc > 1) nBodies = atoi(argv[1]); const float dt = 0.01f; // time step const int nIters = 10; // simulation iterations int bytes = 6*nBodies*sizeof(float); float *buf = (float*)malloc(bytes); BodySystem p; p.x = buf+0*nBodies; p.y = buf+1*nBodies; p.z = buf+2*nBodies; p.vx = buf+3*nBodies; p.vy = buf+4*nBodies; p.vz = buf+5*nBodies; randomizeBodies(buf, 6*nBodies); // Init pos / vel data double totalTime = 0.0; for (int iter = 1; iter <= nIters; iter++) { StartTimer(); bodyForce(p, dt, nBodies); // compute interbody forces for (int i = 0 ; i < nBodies; i++) { // integrate position p.x[i] += p.vx[i]*dt; p.y[i] += p.vy[i]*dt; p.z[i] += p.vz[i]*dt; } const double tElapsed = GetTimer() / 1000.0; if (iter > 1) { // First iter is warm up totalTime += tElapsed; } #ifndef SHMOO printf("Iteration %d: %.3f seconds\n", iter, tElapsed); #endif } double avgTime = totalTime / (double)(nIters-1); #ifdef SHMOO printf("%d, %0.3f\n", nBodies, 1e-9 * nBodies * nBodies / avgTime); #else printf("Average rate for iterations 2 through %d: %.3f +- %.3f steps per second.\n", nIters, rate); printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, 1e-9 * nBodies * nBodies / avgTime); #endif free(buf); }
pslangb.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pzlangb.c, normal z -> s, Fri Sep 28 17:38:12 2018 * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #include "core_lapack.h" #define A(m, n) (float*)plasma_tile_addr(A, m, n) /***************************************************************************//** * Parallel tile calculation of max, one, infinity or Frobenius matrix norm * for a general band matrix. ******************************************************************************/ void plasma_pslangb(plasma_enum_t norm, plasma_desc_t A, float *work, float *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; float stub; int wcnt = 0; int ldwork, klt, kut; float *workspace, *scale, *sumsq; switch (norm) { //================ // PlasmaMaxNorm //================ case PlasmaMaxNorm: wcnt = 0; for (int n = 0; n < A.nt; n++ ) { int nvan = plasma_tile_nview(A, n); int m_start = (imax(0, n*A.nb-A.ku)) / A.nb; int m_end = (imin(A.m-1, (n+1)*A.nb+A.kl-1)) / A.nb; for (int m = m_start; m <= m_end; m++ ) { int ldam = plasma_tile_mmain_band(A, m, n); int mvam = plasma_tile_mview(A, m); plasma_core_omp_slange(PlasmaMaxNorm, mvam, nvan, A(m, n), ldam, &stub, &work[wcnt], sequence, request); wcnt++; } } #pragma omp taskwait plasma_core_omp_slange(PlasmaMaxNorm, 1, wcnt, work, 1, &stub, value, sequence, request); break; //================ // PlasmaOneNorm //================ case PlasmaOneNorm: // # of tiles in upper band (not including diagonal) kut = (A.ku+A.nb-1)/A.nb; // # of tiles in lower band (not including diagonal) klt = (A.kl+A.nb-1)/A.nb; ldwork = kut+klt+1; for (int n = 0; n < A.nt; n++ ) { int nvan = plasma_tile_nview(A, n); int m_start = (imax(0, n*A.nb-A.ku)) / A.nb; int m_end = (imin(A.m-1, (n+1)*A.nb+A.kl-1)) / A.nb; for (int m = m_start; m <= m_end; m++ ) { int ldam = plasma_tile_mmain_band(A, m, n); int mvam = plasma_tile_mview(A, m); plasma_core_omp_slange_aux(PlasmaOneNorm, mvam, nvan, A(m,n), ldam, &work[(m-m_start)*A.n+n*A.nb], sequence, request); } } #pragma omp taskwait workspace = &work[A.n*ldwork]; plasma_core_omp_slange(PlasmaInfNorm, A.n, ldwork, work, A.n, workspace, value, sequence, request); break; //================ // PlasmaInfNorm //================ case PlasmaInfNorm: ldwork = A.mb*A.mt; for (int n = 0; n < A.nt; n++ ) { int nvan = plasma_tile_nview(A, n); int m_start = (imax(0, n*A.nb-A.ku)) / A.nb; int m_end = (imin(A.m-1, (n+1)*A.nb+A.kl-1)) / A.nb; for (int m = m_start; m <= m_end; m++ ) { int ldam = plasma_tile_mmain_band(A, m, n); int mvam = plasma_tile_mview(A, m); plasma_core_omp_slange_aux(PlasmaInfNorm, mvam, nvan, A(m,n), ldam, &work[m*A.mb+n*ldwork], sequence, request); } } #pragma omp taskwait //nwork = A.nt; workspace = &work[ldwork*A.nt]; plasma_core_omp_slange(PlasmaInfNorm, ldwork, A.nt, work, ldwork, workspace, value, sequence, request); break; //====================== // PlasmaFrobeniusNorm //====================== case PlasmaFrobeniusNorm: kut = (A.ku+A.nb-1)/A.nb; // # of tiles in upper band (not including diagonal) klt = (A.kl+A.nb-1)/A.nb; // # of tiles in lower band (not including diagonal) ldwork = kut+klt+1; scale = work; sumsq = &work[ldwork*A.nt]; for (int n = 0; n < A.nt; n++ ) { int nvan = plasma_tile_nview(A, n); int m_start = (imax(0, n*A.nb-A.ku)) / A.nb; int m_end = (imin(A.m-1, (n+1)*A.nb+A.kl-1)) / A.nb; for (int m = m_start; m <= m_end; m++ ) { int ldam = plasma_tile_mmain_band(A, m, n); int mvam = plasma_tile_mview(A, m); plasma_core_omp_sgessq(mvam, nvan, A(m,n), ldam, &scale[n*ldwork+m-m_start], &sumsq[n*ldwork+m-m_start], sequence, request); } } #pragma omp taskwait plasma_core_omp_sgessq_aux(ldwork*A.nt, scale, sumsq, value, sequence, request); break; default: assert(0); } }
critical.c
/* PMSIS includes */ #include "pmsis.h" #include "omp.h" #define ARRAY_SIZE 512 uint32_t a[ARRAY_SIZE] = {0}; uint32_t b[ARRAY_SIZE] = {0}; uint32_t c[ARRAY_SIZE] = {0}; /* Cluster main entry, executed by core 0. */ void cluster_delegate(void *arg) { printf("Cluster master core entry\n"); #pragma omp parallel { printf("[%d %d] Fork entry\n", pi_cluster_id(), omp_get_thread_num() ); #pragma omp for for (int i=0; i<ARRAY_SIZE; i++) { a[i] = 2 * i; b[i] = 3 * i; } for(volatile int i = 0; i < (10000 << omp_get_thread_num()); i++); #pragma omp barrier #pragma omp for for (int i=0; i<ARRAY_SIZE; i++) { c[i] = a[i] + b[i]; printf("[%d %d] c[%d]: %d\n", pi_cluster_id(), omp_get_thread_num(), i, c[i]); } #pragma omp barrier #pragma omp critical { uint32_t sum = 0; for (int i=0; i<ARRAY_SIZE; i++) { sum += c[i]; c[i] += i; } printf("Core sum %d: %d\n", pi_core_id(), sum); } } printf("Cluster master core exit\n"); } void helloworld(void) { printf("Entering main controller\n"); uint32_t errors = 0; uint32_t core_id = pi_core_id(), cluster_id = pi_cluster_id(); printf("[%d %d] Hello World!\n", cluster_id, core_id); struct pi_device cluster_dev; struct pi_cluster_conf cl_conf; /* Init cluster configuration structure. */ pi_cluster_conf_init(&cl_conf); cl_conf.id = 0; /* Set cluster ID. */ /* Configure & open cluster. */ pi_open_from_conf(&cluster_dev, &cl_conf); if (pi_cluster_open(&cluster_dev)) { printf("Cluster open failed !\n"); pmsis_exit(-1); } /* Prepare cluster task and send it to cluster. */ struct pi_cluster_task cl_task; pi_cluster_send_task_to_cl(&cluster_dev, pi_cluster_task(&cl_task, cluster_delegate, NULL)); pi_cluster_close(&cluster_dev); printf("Test success !\n"); pmsis_exit(errors); } /* Program Entry. */ int main(void) { printf("\n\n\t *** PMSIS HelloWorld ***\n\n"); return pmsis_kickoff((void *) helloworld); }
ts_separate_clu.c
#include "data_structures.h" // ISO stencils spatial blocking void iso_ref_split( const int shape[3], const int xb, const int yb, const int zb, const int xe, const int ye, const int ze, const real_t * restrict coef, real_t * restrict u, const real_t * restrict v, const real_t * restrict roc2, stencil_CTX stencil_ctx) { int j,k, jb, je; CLU_CTX clu_ctx; clu_ctx.nnx = shape[0]; clu_ctx.nny = shape[1]; clu_ctx.nnz = shape[2]; clu_ctx.ln_domain = ((uint64_t) 1)* shape[0]*shape[1]*shape[2]; for(jb=yb; jb<ye; jb+=stencil_ctx.bs_y) // blocking in Y { je = (jb+stencil_ctx.bs_y)<(ye)?(jb+stencil_ctx.bs_y):(ye); #pragma omp parallel num_threads(stencil_ctx.thread_group_size) { #pragma omp for private(k,j) schedule(static,1) for(k=zb; k<ze; k++) { for(j=jb; j<je; j++) { stencil_ctx.clu_func(clu_ctx, xb, xe, j, k, coef, u, v, roc2); } } } } } // 1WD kernel void swd_iso_ref_split( const int shape[3], const int xb, const int yb_r, const int zb, const int xe, const int ye_r, const int ze, const real_t * restrict coef, real_t * restrict u, real_t * restrict v, const real_t * restrict roc2, int t_dim, int b_inc, int e_inc, int NHALO, stencil_CTX stencil_ctx, int mtid){ int i, j, k, t, yb, ye, zi, kt, ib, ie, ib_r, ie_r, bs_x; int nny =shape[1]; int nnx =shape[0]; int nwf = stencil_ctx.num_wf; int time_blk = t_dim*2+1; //temporal block size CLU_CTX clu_ctx; clu_ctx.nnx = shape[0]; clu_ctx.nny = shape[1]; clu_ctx.nnz = shape[2]; clu_ctx.ln_domain = ((uint64_t) 1)* shape[0]*shape[1]*shape[2]; if (zb+nwf >= ze) nwf = ze-zb; bs_x = stencil_ctx.bs_x; for(ib_r=xb; ib_r<xe; ib_r+=bs_x) { // blocking in X ie_r = (ib_r+bs_x)<(xe)?(ib_r+bs_x):(xe); //printf("bs_x:%d xb:%d xe:%d ib:%d ie:%d\n", bs_x, xb, xe, ib, ie); for(zi=zb; zi<ze; zi+=nwf) { // wavefront loop if(zi+nwf >= ze) nwf = ze-zi; yb = yb_r; ye = ye_r; ib = ib_r; ie = ie_r; kt = zi; for(t=0; t< time_blk; t++){ if((t)%2 == 1){ for(k=kt; k<nwf+kt; k++){ for(j=yb; j<ye; j++) { stencil_ctx.clu_func(clu_ctx, ib, ie, j, k, coef, u, v, roc2); } } }else{ for(k=kt; k<nwf+kt; k++){ for(j=yb; j<ye; j++) { stencil_ctx.clu_func(clu_ctx, ib, ie, j, k, coef, v, u, roc2); } } } // Update block size in Y if(t< t_dim){ // inverted trapezoid (or lower half of the diamond) yb -= b_inc; ye += e_inc; }else{ // trapezoid (or upper half of the diamond) yb += b_inc; ye -= e_inc; } // Update block size in X if (ib != xb) ib-=NHALO; if (ie != xe) ie-=NHALO; kt -= NHALO; } // time loop } // wavefront loop } // blocking in x } void mwd_iso_ref_split( const int shape[3], const int xb, const int yb_r, const int zb, const int xe, const int ye_r, const int ze, const real_t * restrict coef, real_t * restrict u, real_t * restrict v, const real_t * restrict roc2, int t_dim, int b_inc, int e_inc, int NHALO, stencil_CTX stencil_ctx, int mtid) { double t_start; int i, j, k, t, zi, kt, yb, ye, tid, not_done, gtid, thb, the, q, r, ib, ie, ib_r, ie_r; int nny =shape[1]; int nnx =shape[0]; int nwf = stencil_ctx.num_wf; int time_blk = t_dim*2+1; //temporal block size int tgs = stencil_ctx.thread_group_size; int th_nwf = nwf/tgs; int bs_x = stencil_ctx.bs_x; #pragma omp parallel private(ib_r, ie_r, ib, ie, zi, kt, yb, ye, t, q, r, thb, the, tid, gtid, i, j, k, not_done) shared(bs_x, tgs, nwf, th_nwf, zb, ze, yb_r, ye_r, mtid, xb, xe, time_blk, t_dim, b_inc, e_inc, t_start) num_threads(stencil_ctx.thread_group_size) { tid = 0; gtid = 0; #if defined(_OPENMP) tid = omp_get_thread_num(); gtid = tid + mtid * tgs; #endif CLU_CTX clu_ctx; clu_ctx.nnx = shape[0]; clu_ctx.nny = shape[1]; clu_ctx.nnz = shape[2]; clu_ctx.ln_domain = ((uint64_t) 1)* shape[0]*shape[1]*shape[2]; ib_r = xb; while(ib_r < xe){ // blocking in X ie_r = (ib_r+bs_x)<(xe)?(ib_r+bs_x):(xe); ib = ib_r; ie = ie_r; not_done = 1; zi = zb; kt = zb; t = 0; yb = yb_r; ye = ye_r; thb = th_nwf*tid; the = th_nwf*(tid+1); while(not_done) { // wavrfront loop //if(t==0) //if(tid==1) // printf("[%d, %d]ib_r:%d ie_r:%d ib:%d ie:%d bs_x:%d t:%d yb_r:%d, ye_r:%d yb:%d ye:%d thb:%d the:%d nwf:%d zi:%d kt:%d \n", // gtid, tid, ib_r, ie_r, ib, ie, bs_x, t, yb_r, ye_r, yb, ye, thb, the, nwf, zi, kt); if((t)%2 == 1){ for(k=kt+thb; k<kt+the; k++){ for(j=yb; j<ye; j++) { stencil_ctx.clu_func(clu_ctx, ib, ie, j, k, coef, u, v, roc2); } } }else{ for(k=kt+thb; k<kt+the; k++){ for(j=yb; j<ye; j++) { stencil_ctx.clu_func(clu_ctx, ib, ie, j, k, coef, v, u, roc2); } } } // } if(t+1 < time_blk){ // Update block size in Y if(t< t_dim){ // inverted trapezoid (or lower half of the diamond) yb -= b_inc; ye += e_inc; }else{ // trapezoid (or upper half of the diamond) yb += b_inc; ye -= e_inc; } kt -= NHALO; t++; // Update block size in X if (ib != xb) ib-=NHALO; if (ie != xe) ie-=NHALO; } else { //printf("\n"); t = 0; yb = yb_r; ye = ye_r; ib = ib_r; ie = ie_r; zi+=nwf; kt = zi; if(zi >= ze) not_done = 0; } // reassign the wavefronts to cores if fraction of MW remains if( ((ze-zi) < nwf) & (t == 0)){ q = (int)((ze-zi)/tgs); r = (ze-zi)%tgs; if(tid < r) { thb = tid * (q+1); the = thb + (q+1); }else { thb = r * (q+1) + (tid - r) * q; the =thb + q; } //if(not_done == 1) printf("[%d] q:%d r:%d thb:%d the:%d rem:%d\n", tid, q, r, thb, the, ze-zi); } t_start = MPI_Wtime(); #pragma omp barrier stencil_ctx.t_wait[gtid] += MPI_Wtime() - t_start; } // z loop (wavefront) // move to next block in X ib_r += bs_x; } // Blocking in X loop } // parallel region //printf("\n"); }
GB_binop__rminus_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint64) // A*D function (colscale): GB (_AxD__rminus_uint64) // D*A function (rowscale): GB (_DxB__rminus_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint64) // C=scalar+B GB (_bind1st__rminus_uint64) // C=scalar+B' GB (_bind1st_tran__rminus_uint64) // C=A+scalar GB (_bind2nd__rminus_uint64) // C=A'+scalar GB (_bind2nd_tran__rminus_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_UINT64 || GxB_NO_RMINUS_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
PAddOP.h
#ifndef PAddOP #define PAddOP /* * PAddOP.h: * (pointwise) add * * Created on: June 13, 2017 * Author: mszhang */ //#include "Eigen/Dense" #include "MyLib.h" #include "Node.h" #include "Graph.h" class PAddNode : public Node { public: vector<PNode> ins; vector<LDG::PTensor> vec_in_val; vector<LDG::PTensor> vec_ins_loss; vector<LDG::PTensor> vec_loss; ~PAddNode() { ins.clear(); } public: PAddNode() : Node() { ins.clear(); node_type = "point-add"; } inline void clearValue() { ins.clear(); Node::clearValue(); } public: void forward(Graph *cg, const vector<PNode>& x) { if (x.size() == 0) { std::cout << "empty inputs for add" << std::endl; return; } ins.clear(); for (int i = 0; i < x.size(); i++) { int dim0 = x[i]->val.shape().dims()[0]; if (dim0 == dim) { ins.push_back(x[i]); } else { std::cout << "dim does not match" << std::endl; } } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } if (x3->dim == dim) { ins.push_back(x3); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } if (x3->dim == dim) { ins.push_back(x3); } else { std::cout << "dim does not match" << std::endl; } if (x4->dim == dim) { ins.push_back(x4); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4, PNode x5) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } if (x3->dim == dim) { ins.push_back(x3); } else { std::cout << "dim does not match" << std::endl; } if (x4->dim == dim) { ins.push_back(x4); } else { std::cout << "dim does not match" << std::endl; } if (x5->dim == dim) { ins.push_back(x5); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4, PNode x5, PNode x6) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } if (x3->dim == dim) { ins.push_back(x3); } else { std::cout << "dim does not match" << std::endl; } if (x4->dim == dim) { ins.push_back(x4); } else { std::cout << "dim does not match" << std::endl; } if (x5->dim == dim) { ins.push_back(x5); } else { std::cout << "dim does not match" << std::endl; } if (x6->dim == dim) { ins.push_back(x6); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } public: /* inline void compute() { int nSize = ins.size(); DEV->zero(val); //val.zero(); vec_in_val.resize(nSize); for (int i = 0; i < nSize; ++i) { vec_in_val[i] = (&ins[i]->val); //DEV->Fadd(val, ins[i]->val, val); //for (int idx = 0; idx < dim; idx++) { //val[idx] += ins[i]->val[idx]; //} } DEV->Fadd_inplace(val, vec_in_val); } void backward() { int nSize = ins.size(); vec_ins_loss.resize(nSize); vec_loss.resize(nSize); for (int i = 0; i < nSize; ++i) { vec_ins_loss[i] = (&ins[i]->loss); vec_loss[i] = (&loss); //DEV->Fadd(ins[i]->loss, loss, ins[i]->loss); //for (int idx = 0; idx < dim; idx++) { //ins[i]->loss[idx] += loss[idx]; //} } DEV->Fadd_inplace(vec_ins_loss, vec_loss); } */ public: inline PExecute generate(bool bTrain); // better to rewrite for deep understanding inline bool typeEqual(PNode other) { return Node::typeEqual(other); } }; //#if USE_GPU //class PAddExecute : public Execute { //public: // bool bTrain; //public: // inline void forward() { // int count = batch.size(); // // for (int idx = 0; idx < count; idx++) { // PAddNode* ptr = (PAddNode*)batch[idx]; // ptr->compute(); // ptr->forward_drop(bTrain); // } // } // // inline void backward() { // int count = batch.size(); // for (int idx = 0; idx < count; idx++) { // PAddNode* ptr = (PAddNode*)batch[idx]; // ptr->backward_drop(); // ptr->backward(); // } // } //}; // // //inline PExecute PAddNode::generate(bool bTrain) { // PAddExecute* exec = new PAddExecute(); // exec->batch.push_back(this); // exec->bTrain = bTrain; // return exec; //} //#else class PAddExecute : public Execute { public: bool bTrain; public: inline void forward() { int count = batch.size(); //#pragma omp parallel for schedule(static,1) vector<vector<LDG::PTensor> > vec_vec_x; vector<LDG::PTensor> vec_val; PAddNode* ptr = (PAddNode*)batch[0]; drop_value = ptr->drop_value; for (int idx = 0; idx < count; idx++) { PAddNode* ptr = (PAddNode*)batch[idx]; vector<LDG::PTensor> vec_x; int n = ptr->ins.size(); for(int idy = 0; idy < n; idy++) vec_x.push_back(&ptr->ins[idy]->val); vec_vec_x.push_back(vec_x); vec_val.push_back(&ptr->val); ptr->degree = -1; //ptr->compute(); } DEV->Fadd(vec_vec_x, vec_val); if(drop_value > 0) { if(bTrain) DEV->Fdropout(vec_val, drop_value, mask, vec_val); else DEV->Fdropout(vec_val, drop_value, vec_val); } /* for (int idx = 0; idx < count; idx++) { PAddNode* ptr = (PAddNode*)batch[idx]; //ptr->compute(); ptr->forward_drop(bTrain); } */ } inline void backward() { int count = batch.size(); //#pragma omp parallel for schedule(static,1) vector<LDG::PTensor> vec_loss; vector<vector<LDG::PTensor> > vec_vec_in_loss; for (int idx = 0; idx < count; idx++) { PAddNode* ptr = (PAddNode*)batch[idx]; vec_loss.push_back(&ptr->loss); int n = ptr->ins.size(); vector<LDG::PTensor> vec_in_loss; for(int idy = 0; idy < n; idy++) { vec_in_loss.push_back(&ptr->ins[idy]->loss); } vec_vec_in_loss.push_back(vec_in_loss); //ptr->backward_drop(); //ptr->backward(); } if (drop_value > 0) { DEV->Ddropout(vec_loss, mask); } DEV->Dadd(vec_vec_in_loss, vec_loss); } }; inline PExecute PAddNode::generate(bool bTrain) { PAddExecute* exec = new PAddExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; return exec; } //#endif #endif
2.h
// // Created by aleksey on 03.11.16. // #ifndef TASKSOPENMP_2A_H #define TASKSOPENMP_2A_H #include "Tree.h" #include <iostream> #include <omp.h> using namespace std; // 2a void node_process(struct node *tree) { // Подсчитываем число узлов num_nodes++; // Связываем с каждым узлом какую-то работу // Работа имеет разную вычислительную сложность для различных вершин work(tree->num); // Выводим номер узла, который обработали //cout << tree->num << endl; if (tree->left) node_process(tree->left); if (tree->right) node_process(tree->right); return; } void task_2a(node * tree){ cout << "2a) Последовательная обработка" << endl; clock_t start, finish; // переменные для измерения времени double time; start = clock(); node_process(tree); finish = clock(); time = (double) (finish - start) / CLOCKS_PER_SEC; cout << "Время последовательного варианта " << time << endl; cout << "Число вершин " << num_nodes << endl; } //2b void node_process_parallel(struct node *tree) { // Подсчитываем число узлов #pragma omp atomic num_nodes++; // Связываем с каждым узлом какую-то работу // Работа имеет разную вычислительную сложность для различных вершин work(tree->num); // Выводим номер узла, который обработали //cout << tree->num << " " << omp_get_thread_num() << endl; #pragma omp task if (tree->left) node_process_parallel(tree->left); #pragma omp task if (tree->right) node_process_parallel(tree->right); return; } void task_2b(node * tree) { cout << "2a) Последовательный и параллельный варианты" << endl; clock_t start, finish; // переменные для измерения времени double time1, time2; start = clock(); node_process(tree); finish = clock(); time1 = (double)(finish - start)/CLOCKS_PER_SEC; start = clock(); #pragma omp parallel { #pragma omp single { node_process_parallel(tree); } } finish = clock(); time2 = (double)(finish - start)/CLOCKS_PER_SEC; cout << "Время последовательного варианта " << time1 << endl; cout << "Время параллельного варианта " << time2 << endl; } // 2c void task_2c(node * tree) { cout << "2в) Параллельный вариант без 'pragma omp single'" << endl; clock_t start, finish; // переменные для измерения времени double time1, time2; start = clock(); node_process(tree); finish = clock(); time1 = (double)(finish - start)/CLOCKS_PER_SEC; start = clock(); #pragma omp parallel { { node_process_parallel(tree); } } finish = clock(); time2 = (double)(finish - start)/CLOCKS_PER_SEC; cout << "Время последовательного варианта " << time1 << endl; cout << "Время параллельного варианта " << time2 << endl; } // 2d void node_process_parallel_rising(struct node *tree) { // Подсчитываем число узлов #pragma omp atomic num_nodes++; // Выводим номер узла, который обработали //cout << tree->num << " " << omp_get_thread_num() << endl; #pragma omp task if (tree->left) node_process_parallel(tree->left); #pragma omp task if (tree->right) node_process_parallel(tree->right); // Связываем с каждым узлом какую-то работу // Работа имеет разную вычислительную сложность для различных вершин work(tree->num); return; } void task_2d(node * tree) { cout << "2г) Восходящий и нисходящий варианты" << endl; clock_t start, finish; // переменные для измерения времени double time1, time2; start = clock(); cout << "Нисходящая обработка" << endl; #pragma omp parallel { #pragma omp single { node_process_parallel(tree); } } finish = clock(); time1 = (double)(finish - start)/CLOCKS_PER_SEC; start = clock(); cout << "Восходящая обработка" << endl; #pragma omp parallel { #pragma omp single { node_process_parallel_rising(tree); } } finish = clock(); time2 = (double)(finish - start)/CLOCKS_PER_SEC; cout << "Время нисходящей параллельной обработки " << time1 << endl; cout << "Время восходящей параллельной обработки " << time2 << endl; } #endif //TASKSOPENMP_2A_H
costmap_creators.h
/* Copyright (c) 2013, Kai Klindworth All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef COSTMAP_CREATORS_H #define COSTMAP_CREATORS_H #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include "disparity_toolkit/genericfunctions.h" #include "disparity_toolkit/disparity_range.h" namespace costmap_creators { template<typename T> cv::Mat_<T> create_cost_map(const cv::Mat& image, int range, T default_value) { int sz[] = {image.size[0], image.size[1], range}; return cv::Mat_<T>(3, sz, default_value); } template<typename T> cv::Mat_<T> create_cost_map(const cv::Mat& image, const disparity_range& range, T default_value) { return create_cost_map<T>(image, range.size(), default_value); } template<typename T, typename lambda_func> inline void transform_range(cv::Mat& cost_map, int y, int x, const disparity_range& crange, lambda_func func) { T *result_ptr = cost_map.ptr<T>(y,x, crange.index(crange.start())); for(int d = crange.start(); d <= crange.end(); ++d) { *result_ptr++ = func(y, x, d); } } namespace sliding_window { template<typename cost_class> cv::Mat joint_fixed_size(const cv::Mat& base, const cv::Mat& match, const disparity_range range, const int windowsize) { assert(windowsize > 0); using prob_table_type = typename cost_class::result_type; cv::Mat cost_map = create_cost_map<prob_table_type>(base, range, std::numeric_limits<prob_table_type>::max()/3); const int border = windowsize/2; const int y_min = border; const int y_max = base.rows - border; const int x_min = border; const int x_max = base.cols - border; cost_class cost_agg(base, match, windowsize); typename cost_class::thread_type thread_data; #pragma omp parallel for private(thread_data) for(int y = y_min; y < y_max; ++y) { cost_agg.prepare_row(thread_data, y); cv::Mat windowBase = subwindow(base, x_min, y, windowsize); for(int x = x_min; x < x_max; ++x) { cost_agg.prepare_window(thread_data, windowBase); const disparity_range crange = range.restrict_to_image(x, base.cols, border); transform_range<prob_table_type>(cost_map, y, x, crange, [&](int, int x, int d){ return cost_agg.increm(thread_data, x+d); }); windowBase.adjustROI(0,0,-1,1); } } return cost_map; } template<typename cost_class> cv::Mat joint_flexible_size(const cv::Mat& base, const cv::Mat& match, const disparity_range range, const cv::Mat_<cv::Vec2b>& windowsizes) { int min_windowsize = 7; typedef float prob_table_type; cv::Mat cost_map = create_cost_map<prob_table_type>(base, range, 8); const int y_min = min_windowsize/2; const int y_max = base.rows - min_windowsize/2; const int x_min = min_windowsize/2; const int x_max = base.cols - min_windowsize/2; cost_class cost_agg(base, match, range); typename cost_class::thread_type thread_data; #pragma omp parallel for private(thread_data) for(int y = y_min; y < y_max; ++y) { cost_agg.prepare_row(thread_data, y); for(int x = x_min; x < x_max; ++x) { const cv::Vec2b cwindowsize = windowsizes(y,x); if(cwindowsize[0] > 0 && cwindowsize[1] > 0) { cv::Mat windowBase = subwindow(base, x, y, cwindowsize[1], cwindowsize[0] ); cost_agg.prepareWindow(thread_data, windowBase, cwindowsize[1], cwindowsize[0] ); const disparity_range crange = range.restrict_to_image(x, base.cols, cwindowsize[1]/2); transform_range<prob_table_type>(cost_map, y, x, crange, [&](int, int x, int d) { return cost_agg.increm(thread_data, x, d); }); } } } return cost_map; } //berechnet fuer subranges die disparitaet. disp*_comp gibt den gesamten Bereich an, rangeCenter-/+dispRange/2 den Teilbereich template<typename cost_class> cv::Mat flexible_size_flexible_disparityrange(const cv::Mat& base, const cv::Mat& match, const cv::Mat& windowsizes, const cv::Mat& rangeCenter, int disparity_delta, const disparity_range range_bound, unsigned int min_windowsize, unsigned int max_windowsize) { typedef float prob_table_type; cv::Mat cost_map = create_cost_map<prob_table_type>(base, disparity_delta*2+1, 8); const int y_min = min_windowsize/2; const int y_max = base.rows - min_windowsize/2; const int x_min = min_windowsize/2; const int x_max = base.cols - min_windowsize/2; cost_class cost_agg(base, match, range_bound, max_windowsize); typename cost_class::thread_type thread_data; #pragma omp parallel for private(thread_data) for(int y = y_min; y < y_max; ++y) { cost_agg.prepare_row(thread_data, y); for(int x = x_min; x < x_max; ++x) { cv::Vec2b cwindowsize = windowsizes.at<cv::Vec2b>(y,x); const disparity_range crange = range_bound.subrange_with_subspace(rangeCenter.at<short>(y,x), disparity_delta).restrict_to_image(x, base.cols, cwindowsize[1]/2); if(cwindowsize[0] > 0 && cwindowsize[1] > 0 && crange.end() > crange.start()) { cost_agg.prepare_window(thread_data, x, cwindowsize[1], cwindowsize[0] ); transform_range<prob_table_type>(cost_map, y, x, crange, [&](int, int x, int d) { return cost_agg.increm(thread_data, x, d); }); } } } return cost_map; } } //! Calculates pointwise (no window involved) the disparity. cost aggregator must be passed as a function object, returns a cost map template<typename cost_class, typename data_type> cv::Mat calculate_pixelwise(cv::Mat base, data_type data, const disparity_range range) { cv::Mat result = create_cost_map<float>(base, range, std::numeric_limits<float>::max()); cost_class cost_agg(base, data, range.start()); #pragma omp parallel for for(int y = 0; y< base.size[0]; ++y) { for(int x = 0; x < base.size[1]; ++x) { const disparity_range crange = range.restrict_to_image(x, base.size[1]); transform_range<float>(result, y, x, crange, [&](int y, int x, int d) { return cost_agg(y, x, d); }); } } return result; } /*template<typename data_type> cv::Mat flexBoxFilter(cv::Mat src, cv::Mat windowsizes) { cv::Mat cost_map(src.size(), CV_32FC1, cv::Scalar(8)); #pragma omp parallel for default(none) shared(src, cost_map, windowsizes) for(int y = 0; y < src.rows; ++y) { for(int x = 0; x < src.cols; ++x) { cv::Vec2b cwindowsize = windowsizes.at<cv::Vec2b>(y,x); if(cwindowsize[0] > 0 && cwindowsize[1] > 0) { cv::Mat windowBase = subwindow(src, x, y, cwindowsize[1], cwindowsize[0] ); cost_map.at<float>(y,x) = cv::sum(windowBase)/windowBase.total(); } } } return cost_map; }*/ } template<typename cost_type, typename window_type> cv::Mat disparitywise_calculator(cost_type cost_func, window_type window_sum, cv::Size base_size, const disparity_range range) { int sz[] = {base_size.height, base_size.width, range.size()}; cv::Mat_<float> result = cv::Mat(3, sz, CV_32FC1, cv::Scalar(std::numeric_limits<float>::max())); //cv::Mat_<float> result = costmap_creators::sliding_window::create_cost_map(base) #pragma omp parallel for for(int d = range.start(); d <= range.end(); ++d) { cv::Mat temp_result = window_sum(cost_func(d), d); for(int y = 0; y < base_size.height; ++y) { if(d < 0) { for(int x = -d; x < base_size.width; ++x) result(y,x,range.index(d)) = temp_result.at<float>(y, x+d); } else { int max_x = base_size.width - d; for(int x = 0; x < max_x; ++x) result(y,x,range.index(d)) = temp_result.at<float>(y, x); } } } return result; } template<typename cost_type> cv::Mat simple_window_disparitywise_calculator(cost_type cost_func, cv::Size window_size, cv::Size base_size, const disparity_range range) { auto window_sum = [=](const cv::Mat& pre_result, int){ cv::Mat temp_result; cv::boxFilter(pre_result, temp_result, -1, window_size); return temp_result; }; return disparitywise_calculator(cost_func, window_sum, base_size, range); } #endif // COSTMAP_CREATORS_H
misc_avx2.c
//sum.c #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #define N_RUNS 1000 #define N 120000 // read timer in second double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } //Create a matrix and a vector and fill with random numbers void init(int *X) { for (int i = 0; i<N; i++) { X[i] = (int)rand()/(int)(RAND_MAX/10.0); } } //Our sum function- what it does is pretty straight-forward. int sum(int *X, int *Y, int *answer) { int result = 0; #pragma omp simd simdlen(8) for (int i = 0; i<N; i++) { answer[i] = X[i] + Y[i] * 2; } return result; } // Debug functions int sum_serial(int *X, int *Y, int *answer) { int result = 0; for (int i = 0; i<N; i++) { answer[i] = X[i] + Y[i] * 2; } return result; } void print_vector(int *vector) { printf("["); for (int i = 0; i<8; i++) { printf("%d ", vector[i]); } puts("]"); } int check(int *serial, int *SIMD) { int diff = 0; for (int i = 0; i<N; i++) diff += serial[i] - SIMD[i]; return diff; } int main(int argc, char **argv) { //Set everything up int *X = malloc(sizeof(int)*N); int *Y = malloc(sizeof(int)*N); int *answer = malloc(sizeof(int)*N); int *answer_serial = malloc(sizeof(int)*N); srand(time(NULL)); init(X); init(Y); double start = read_timer(); for (int i = 0; i<N_RUNS; i++) sum(X, Y, answer); double t = (read_timer() - start); double start_serial = read_timer(); for (int i = 0; i<N_RUNS; i++) sum_serial(X, Y, answer_serial); double t_serial = (read_timer() - start_serial); printf("X: "); print_vector(X); puts("+"); printf("Y: "); print_vector(Y); puts("=\n"); printf("SIMD:\n"); print_vector(answer); puts("---------------------------------"); printf("Serial:\n"); print_vector(answer_serial); double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t); double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial); printf("==================================================================\n"); printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n"); printf("------------------------------------------------------------------\n"); printf("Sum (SIMD):\t\t%4f\t%4f\n", t, gflops); printf("Sum (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial); printf("Correctness:\t\t%d\n", check(answer_serial, answer)); free(X); free(Y); free(answer); free(answer_serial); return 0; }
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *,const MapMode, const RectangleInfo *,const MagickBooleanType,NexusInfo *,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireCriticalMemory(sizeof(*cache_info)); (void) memset(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->disk_mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory( number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads, sizeof(**nexus_info)); if (nexus_info[0] == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(nexus_info[0],0,number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) number_threads; i++) { nexus_info[i]=(&nexus_info[0][i]); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % void *AcquirePixelCachePixels(const Image *image,size_t *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=(size_t) cache_info->length; return(cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickSizeType number_pixels; NexusInfo **magick_restrict image_nexus; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t n; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & WriteMaskChannel) == 0) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); image_nexus=AcquirePixelCacheNexus(1); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height,image_nexus[0], exception); q=nexus_info->pixels; number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; for (n=0; n < (ssize_t) number_pixels; n++) { double mask_alpha; register ssize_t i; if (p == (Quantum *) NULL) break; mask_alpha=QuantumScale*GetPixelWriteMask(image,p); if (fabs(mask_alpha) >= MagickEpsilon) { for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha* GetPixelAlpha(image,p),(double) q[i],(double) GetPixelAlpha(image,q))); } SetPixelAlpha(image,GetPixelAlpha(image,p),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } image_nexus=DestroyPixelCacheNexus(image_nexus,1); if (n < (ssize_t) number_pixels) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->storage_class == clone_info->storage_class) && (cache_info->colorspace == clone_info->colorspace) && (cache_info->alpha_trait == clone_info->alpha_trait) && (cache_info->channels == clone_info->channels) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->number_channels*cache_info->columns*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads); clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns, clone_info->number_channels*clone_info->columns); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; RectangleInfo region; register ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,MagickFalse, cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region,MagickFalse, clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { register ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; RectangleInfo region; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,MagickFalse, cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region, MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads); clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache == (void *) NULL) return; image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) number_threads; i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1)) { SyncImagePixelCache((Image *) image,exception); cache_info=(CacheInfo *) image->cache; } if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); } if (cache_info->opencl != (MagickCLCacheInfo) NULL) RetainOpenCLMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=time((time_t *) NULL); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (time((time_t *) NULL)-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { destroy=MagickTrue; image->cache=clone_info; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { register ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the class type of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) memset(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(cache_info->number_channels*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(cache_info->number_channels*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCacheNexus() method is: % % Quantum *GetVirtualPixelCacheNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; /* Compute the remainder of dividing offset by extent. It returns not only the quotient (tile the offset falls in) but also the positive remainer within that tile such that 0 <= remainder < extent. This method is essentially a ldiv() using a floored modulo division rather than the normal default truncated modulo division. */ modulo.quotient=offset/(ssize_t) extent; if (offset < 0L) modulo.quotient--; modulo.remainder=(ssize_t) (offset-(MagickOffsetType) modulo.quotient* (MagickOffsetType) extent); return(modulo); } MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo **magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; RectangleInfo region; register const Quantum *magick_restrict p; register const void *magick_restrict r; register Quantum *magick_restrict q; register ssize_t i, u; register unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ s=(unsigned char *) nexus_info->metacontent; virtual_nexus=AcquirePixelCacheNexus(1); (void) memset(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) memset(virtual_metacontent,0,cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) length*cache_info->number_channels* sizeof(*p)); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,*virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); (void) memcpy(q,p,(size_t) length*cache_info->number_channels*sizeof(*p)); q+=length*cache_info->number_channels; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the image mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum ApplyPixelCompositeMask(const Quantum p, const MagickRealType alpha,const Quantum q,const MagickRealType beta) { double mask_alpha; if (fabs(alpha-OpaqueAlpha) < MagickEpsilon) return(p); mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta; mask_alpha=PerceptibleReciprocal(mask_alpha); return(ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q,beta))); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickSizeType number_pixels; NexusInfo **magick_restrict image_nexus; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t n; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & CompositeMaskChannel) == 0) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); image_nexus=AcquirePixelCacheNexus(1); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height,image_nexus[0], exception); q=nexus_info->pixels; number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; for (n=0; n < (ssize_t) number_pixels; n++) { double mask_alpha; register ssize_t i; if (p == (Quantum *) NULL) break; mask_alpha=(double) GetPixelCompositeMask(image,p); for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i], (MagickRealType) GetPixelAlpha(image,q)); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } image_nexus=DestroyPixelCacheNexus(image_nexus,1); if (n < (ssize_t) number_pixels) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->disk_mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; cache_info->disk_mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) (void) posix_fallocate(cache_info->file,offset+1,extent-offset); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *hosts, *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) || (AcquireMagickResource(HeightResource,image->rows) == MagickFalse)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); length=GetImageListLength(image); if (AcquireMagickResource(ListLengthResource,length) == MagickFalse) ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit", image->filename); source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) image->scene); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->channels=image->channels; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=cache_info->number_channels*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,(MagickSizeType) cache_info->columns*cache_info->rows); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) cache_info->pixels=source_info.pixels; else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ number_pixels*cache_info->number_channels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length != (MagickSizeType) ((size_t) length)) cache_info->type=DiskCache; else { status=AcquireMagickResource(MapResource,cache_info->length); if (status == MagickFalse) cache_info->type=DiskCache; else if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache)) { cache_info->type=DiskCache; RelinquishMagickResource(MapResource,cache_info->length); } else { cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->type=DiskCache; cache_info->pixels=source_info.pixels; RelinquishMagickResource(MapResource,cache_info->length); } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ number_pixels*cache_info->number_channels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=DiskCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(SyncImagePixelCache(image,exception)); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->channels=cache_info->channels; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; RectangleInfo region; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,&region, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register ssize_t y; register unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register Quantum *magick_restrict q; register ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+offset*cache_info->number_channels; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t C a c h e A n o n y m o u s M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetCacheAnonymousMemory() resets the anonymous_memory value. % % The format of the ResetCacheAnonymousMemory method is: % % void ResetCacheAnonymousMemory(void) % */ MagickPrivate void ResetCacheAnonymousMemory(void) { cache_anonymous_memory=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels(const CacheInfo *cache_info, % const MapMode mode,const RectangleInfo *region, % const MagickBooleanType buffered,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o region: A pointer to the RectangleInfo structure that defines the % region of this particular cache nexus. % % o buffered: if true, nexus pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,NexusInfo *nexus_info, ExceptionInfo *exception) { if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length)) return(MagickFalse); if (cache_anonymous_memory <= 0) { nexus_info->mapped=MagickFalse; nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) nexus_info->length)); if (nexus_info->cache != (Quantum *) NULL) (void) memset(nexus_info->cache,0,(size_t) nexus_info->length); } else { nexus_info->mapped=MagickTrue; nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) nexus_info->length); } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } return(MagickTrue); } static inline MagickBooleanType IsPixelCacheAuthentic( const CacheInfo *magick_restrict cache_info, const NexusInfo *magick_restrict nexus_info) { MagickBooleanType status; MagickOffsetType offset; /* Does nexus pixels point directly to in-core cache pixels or is it buffered? */ if (cache_info->type == PingCache) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; status=nexus_info->pixels == (cache_info->pixels+offset* cache_info->number_channels) ? MagickTrue : MagickFalse; return(status); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1); } static Quantum *SetPixelCacheNexusPixels(const CacheInfo *cache_info, const MapMode mode,const RectangleInfo *region, const MagickBooleanType buffered,NexusInfo *nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); if ((region->width == 0) || (region->height == 0)) return((Quantum *) NULL); nexus_info->region=(*region); number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; if (number_pixels == 0) return((Quantum *) NULL); if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { ssize_t x, y; x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1; y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1; if (((nexus_info->region.x >= 0) && (nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) && (((nexus_info->region.x == 0) && (nexus_info->region.width == cache_info->columns)) || ((nexus_info->region.height == 1) && (x < (ssize_t) cache_info->columns)))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info, nexus_info); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ length=number_pixels*cache_info->number_channels*sizeof(Quantum); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; if (nexus_info->cache == (Quantum *) NULL) { nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((Quantum *) NULL); } } else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((Quantum *) NULL); } } nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+number_pixels* cache_info->number_channels); PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info, nexus_info); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (image->mask_trait != UpdatePixelTrait) { if (((image->channels & WriteMaskChannel) != 0) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (((image->channels & CompositeMaskChannel) != 0) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); } if (nexus_info->authentic_pixel_cache != MagickFalse) { image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (status != MagickFalse) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const unsigned char *magick_restrict p; register ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const Quantum *magick_restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+offset*cache_info->number_channels; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->number_channels*cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
hypre_hopscotch_hash.c
#include "hypre_hopscotch_hash.h" static HYPRE_Int NearestPowerOfTwo( HYPRE_Int value ) { HYPRE_Int rc = 1; while (rc < value) { rc <<= 1; } return rc; } static void InitBucket(hypre_HopscotchBucket *b) { b->hopInfo = 0; b->hash = HYPRE_HOPSCOTCH_HASH_EMPTY; } static void InitBigBucket(hypre_BigHopscotchBucket *b) { b->hopInfo = 0; b->hash = HYPRE_HOPSCOTCH_HASH_EMPTY; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH static void InitSegment(hypre_HopscotchSegment *s) { s->timestamp = 0; omp_init_lock(&s->lock); } static void DestroySegment(hypre_HopscotchSegment *s) { omp_destroy_lock(&s->lock); } #endif void hypre_UnorderedIntSetCreate( hypre_UnorderedIntSet *s, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel) { s->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1; if (inCapacity < s->segmentMask + 1) { inCapacity = s->segmentMask + 1; } //ADJUST INPUT ............................ HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity+4096); HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1; s->bucketMask = adjInitCap - 1; HYPRE_Int i; //ALLOCATE THE SEGMENTS ................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH s->segments = hypre_TAlloc(hypre_HopscotchSegment, s->segmentMask + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= s->segmentMask; ++i) { InitSegment(&s->segments[i]); } #endif s->hopInfo = hypre_TAlloc(hypre_uint, num_buckets, HYPRE_MEMORY_HOST); s->key = hypre_TAlloc(HYPRE_Int, num_buckets, HYPRE_MEMORY_HOST); s->hash = hypre_TAlloc(HYPRE_Int, num_buckets, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp parallel for #endif for (i = 0; i < num_buckets; ++i) { s->hopInfo[i] = 0; s->hash[i] = HYPRE_HOPSCOTCH_HASH_EMPTY; } } void hypre_UnorderedBigIntSetCreate( hypre_UnorderedBigIntSet *s, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel) { s->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1; if (inCapacity < s->segmentMask + 1) { inCapacity = s->segmentMask + 1; } //ADJUST INPUT ............................ HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity+4096); HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1; s->bucketMask = adjInitCap - 1; HYPRE_Int i; //ALLOCATE THE SEGMENTS ................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH s->segments = hypre_TAlloc(hypre_HopscotchSegment, s->segmentMask + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= s->segmentMask; ++i) { InitSegment(&s->segments[i]); } #endif s->hopInfo = hypre_TAlloc(hypre_uint, num_buckets, HYPRE_MEMORY_HOST); s->key = hypre_TAlloc(HYPRE_BigInt, num_buckets, HYPRE_MEMORY_HOST); s->hash = hypre_TAlloc(HYPRE_BigInt, num_buckets, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp parallel for #endif for (i = 0; i < num_buckets; ++i) { s->hopInfo[i] = 0; s->hash[i] = HYPRE_HOPSCOTCH_HASH_EMPTY; } } void hypre_UnorderedIntMapCreate( hypre_UnorderedIntMap *m, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel) { m->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1; if (inCapacity < m->segmentMask + 1) { inCapacity = m->segmentMask + 1; } //ADJUST INPUT ............................ HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity+4096); HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1; m->bucketMask = adjInitCap - 1; HYPRE_Int i; //ALLOCATE THE SEGMENTS ................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH m->segments = hypre_TAlloc(hypre_HopscotchSegment, m->segmentMask + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= m->segmentMask; i++) { InitSegment(&m->segments[i]); } #endif m->table = hypre_TAlloc(hypre_HopscotchBucket, num_buckets, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp parallel for #endif for (i = 0; i < num_buckets; i++) { InitBucket(&m->table[i]); } } void hypre_UnorderedBigIntMapCreate( hypre_UnorderedBigIntMap *m, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel) { m->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1; if (inCapacity < m->segmentMask + 1) { inCapacity = m->segmentMask + 1; } //ADJUST INPUT ............................ HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity+4096); HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1; m->bucketMask = adjInitCap - 1; HYPRE_Int i; //ALLOCATE THE SEGMENTS ................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH m->segments = hypre_TAlloc(hypre_HopscotchSegment, m->segmentMask + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= m->segmentMask; i++) { InitSegment(&m->segments[i]); } #endif m->table = hypre_TAlloc(hypre_BigHopscotchBucket, num_buckets, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp parallel for #endif for (i = 0; i < num_buckets; i++) { InitBigBucket(&m->table[i]); } } void hypre_UnorderedIntSetDestroy( hypre_UnorderedIntSet *s ) { hypre_TFree(s->hopInfo, HYPRE_MEMORY_HOST); hypre_TFree(s->key, HYPRE_MEMORY_HOST); hypre_TFree(s->hash, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int i; for (i = 0; i <= s->segmentMask; i++) { DestroySegment(&s->segments[i]); } hypre_TFree(s->segments, HYPRE_MEMORY_HOST); #endif } void hypre_UnorderedBigIntSetDestroy( hypre_UnorderedBigIntSet *s ) { hypre_TFree(s->hopInfo, HYPRE_MEMORY_HOST); hypre_TFree(s->key, HYPRE_MEMORY_HOST); hypre_TFree(s->hash, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int i; for (i = 0; i <= s->segmentMask; i++) { DestroySegment(&s->segments[i]); } hypre_TFree(s->segments, HYPRE_MEMORY_HOST); #endif } void hypre_UnorderedIntMapDestroy( hypre_UnorderedIntMap *m) { hypre_TFree(m->table, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int i; for (i = 0; i <= m->segmentMask; i++) { DestroySegment(&m->segments[i]); } hypre_TFree(m->segments, HYPRE_MEMORY_HOST); #endif } void hypre_UnorderedBigIntMapDestroy( hypre_UnorderedBigIntMap *m) { hypre_TFree(m->table, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int i; for (i = 0; i <= m->segmentMask; i++) { DestroySegment(&m->segments[i]); } hypre_TFree(m->segments, HYPRE_MEMORY_HOST); #endif } HYPRE_Int *hypre_UnorderedIntSetCopyToArray( hypre_UnorderedIntSet *s, HYPRE_Int *len ) { /*HYPRE_Int prefix_sum_workspace[hypre_NumThreads() + 1];*/ HYPRE_Int *prefix_sum_workspace; HYPRE_Int *ret_array = NULL; prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp parallel #endif { HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, n); HYPRE_Int cnt = 0; HYPRE_Int i; for (i = i_begin; i < i_end; i++) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) cnt++; } hypre_prefix_sum(&cnt, len, prefix_sum_workspace); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp barrier #pragma omp master #endif { ret_array = hypre_TAlloc(HYPRE_Int, *len, HYPRE_MEMORY_HOST); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp barrier #endif for (i = i_begin; i < i_end; i++) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) ret_array[cnt++] = s->key[i]; } } hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); return ret_array; } HYPRE_BigInt *hypre_UnorderedBigIntSetCopyToArray( hypre_UnorderedBigIntSet *s, HYPRE_Int *len ) { /*HYPRE_Int prefix_sum_workspace[hypre_NumThreads() + 1];*/ HYPRE_Int *prefix_sum_workspace; HYPRE_BigInt *ret_array = NULL; prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp parallel #endif { HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, n); HYPRE_Int cnt = 0; HYPRE_Int i; for (i = i_begin; i < i_end; i++) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) cnt++; } hypre_prefix_sum(&cnt, len, prefix_sum_workspace); #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp barrier #pragma omp master #endif { ret_array = hypre_TAlloc(HYPRE_BigInt, *len, HYPRE_MEMORY_HOST); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp barrier #endif for (i = i_begin; i < i_end; i++) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) ret_array[cnt++] = s->key[i]; } } hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); return ret_array; }
CGOpenMPRuntime.h
//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This provides a class for OpenMP runtime code generation. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #include "CGValue.h" #include "clang/AST/DeclOpenMP.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSet.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" #include "llvm/IR/Function.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/AtomicOrdering.h" namespace llvm { class ArrayType; class Constant; class FunctionType; class GlobalVariable; class StructType; class Type; class Value; class OpenMPIRBuilder; } // namespace llvm namespace clang { class Expr; class OMPDependClause; class OMPExecutableDirective; class OMPLoopDirective; class VarDecl; class OMPDeclareReductionDecl; class IdentifierInfo; namespace CodeGen { class Address; class CodeGenFunction; class CodeGenModule; /// A basic class for pre|post-action for advanced codegen sequence for OpenMP /// region. class PrePostActionTy { public: explicit PrePostActionTy() {} virtual void Enter(CodeGenFunction &CGF) {} virtual void Exit(CodeGenFunction &CGF) {} virtual ~PrePostActionTy() {} }; /// Class provides a way to call simple version of codegen for OpenMP region, or /// an advanced with possible pre|post-actions in codegen. class RegionCodeGenTy final { intptr_t CodeGen; typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &); CodeGenTy Callback; mutable PrePostActionTy *PrePostAction; RegionCodeGenTy() = delete; RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete; template <typename Callable> static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF, PrePostActionTy &Action) { return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action); } public: template <typename Callable> RegionCodeGenTy( Callable &&CodeGen, std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>, RegionCodeGenTy>::value> * = nullptr) : CodeGen(reinterpret_cast<intptr_t>(&CodeGen)), Callback(CallbackFn<std::remove_reference_t<Callable>>), PrePostAction(nullptr) {} void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; } void operator()(CodeGenFunction &CGF) const; }; struct OMPTaskDataTy final { SmallVector<const Expr *, 4> PrivateVars; SmallVector<const Expr *, 4> PrivateCopies; SmallVector<const Expr *, 4> FirstprivateVars; SmallVector<const Expr *, 4> FirstprivateCopies; SmallVector<const Expr *, 4> FirstprivateInits; SmallVector<const Expr *, 4> LastprivateVars; SmallVector<const Expr *, 4> LastprivateCopies; SmallVector<const Expr *, 4> ReductionVars; SmallVector<const Expr *, 4> ReductionOrigs; SmallVector<const Expr *, 4> ReductionCopies; SmallVector<const Expr *, 4> ReductionOps; SmallVector<CanonicalDeclPtr<const VarDecl>, 4> PrivateLocals; struct DependData { OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; const Expr *IteratorExpr = nullptr; SmallVector<const Expr *, 4> DepExprs; explicit DependData() = default; DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr) : DepKind(DepKind), IteratorExpr(IteratorExpr) {} }; SmallVector<DependData, 4> Dependences; llvm::PointerIntPair<llvm::Value *, 1, bool> Final; llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule; llvm::PointerIntPair<llvm::Value *, 1, bool> Priority; llvm::Value *Reductions = nullptr; unsigned NumberOfParts = 0; bool Tied = true; bool Nogroup = false; bool IsReductionWithTaskMod = false; bool IsWorksharingReduction = false; }; /// Class intended to support codegen of all kind of the reduction clauses. class ReductionCodeGen { private: /// Data required for codegen of reduction clauses. struct ReductionData { /// Reference to the item shared between tasks to reduce into. const Expr *Shared = nullptr; /// Reference to the original item. const Expr *Ref = nullptr; /// Helper expression for generation of private copy. const Expr *Private = nullptr; /// Helper expression for generation reduction operation. const Expr *ReductionOp = nullptr; ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private, const Expr *ReductionOp) : Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) { } }; /// List of reduction-based clauses. SmallVector<ReductionData, 4> ClausesData; /// List of addresses of shared variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses; /// List of addresses of original variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses; /// Sizes of the reduction items in chars. SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes; /// Base declarations for the reduction items. SmallVector<const VarDecl *, 4> BaseDecls; /// Emits lvalue for shared expression. LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E); /// Emits upper bound for shared expression (if array section). LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E); /// Performs aggregate initialization. /// \param N Number of reduction item in the common list. /// \param PrivateAddr Address of the corresponding private item. /// \param SharedLVal Address of the original shared variable. /// \param DRD Declare reduction construct used for reduction item. void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, const OMPDeclareReductionDecl *DRD); public: ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> ReductionOps); /// Emits lvalue for the shared and original reduction item. /// \param N Number of the reduction item. void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. void emitAggregateType(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. /// \param Size Size of the type in chars. void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size); /// Performs initialization of the private copy for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. /// \param DefaultInit Default initialization sequence that should be /// performed if no reduction specific initialization is found. /// \param SharedLVal Address of the original shared variable. void emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, llvm::function_ref<bool(CodeGenFunction &)> DefaultInit); /// Returns true if the private copy requires cleanups. bool needCleanups(unsigned N); /// Emits cleanup code for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Adjusts \p PrivatedAddr for using instead of the original variable /// address in normal operations. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Returns LValue for the reduction item. LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; } /// Returns LValue for the original reduction item. LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; } /// Returns the size of the reduction item (in chars and total number of /// elements in the item), or nullptr, if the size is a constant. std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const { return Sizes[N]; } /// Returns the base declaration of the reduction item. const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; } /// Returns the base declaration of the reduction item. const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; } /// Returns true if the initialization of the reduction item uses initializer /// from declare reduction construct. bool usesReductionInitializer(unsigned N) const; }; class CGOpenMPRuntime { public: /// Allows to disable automatic handling of functions used in target regions /// as those marked as `omp declare target`. class DisableAutoDeclareTargetRAII { CodeGenModule &CGM; bool SavedShouldMarkAsGlobal; public: DisableAutoDeclareTargetRAII(CodeGenModule &CGM); ~DisableAutoDeclareTargetRAII(); }; /// Manages list of nontemporal decls for the specified directive. class NontemporalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S); ~NontemporalDeclsRAII(); }; /// Manages list of nontemporal decls for the specified directive. class UntiedTaskLocalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: UntiedTaskLocalDeclsRAII( CodeGenFunction &CGF, const llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>> &LocalVars); ~UntiedTaskLocalDeclsRAII(); }; /// Maps the expression for the lastprivate variable to the global copy used /// to store new value because original variables are not mapped in inner /// parallel regions. Only private copies are captured but we need also to /// store private copy in shared address. /// Also, stores the expression for the private loop counter and it /// threaprivate name. struct LastprivateConditionalData { llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>> DeclToUniqueName; LValue IVLVal; llvm::Function *Fn = nullptr; bool Disabled = false; }; /// Manages list of lastprivate conditional decls for the specified directive. class LastprivateConditionalRAII { enum class ActionToDo { DoNotPush, PushAsLastprivateConditional, DisableLastprivateConditional, }; CodeGenModule &CGM; ActionToDo Action = ActionToDo::DoNotPush; /// Check and try to disable analysis of inner regions for changes in /// lastprivate conditional. void tryToDisableInnerAnalysis(const OMPExecutableDirective &S, llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled) const; LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S); public: explicit LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal); static LastprivateConditionalRAII disable(CodeGenFunction &CGF, const OMPExecutableDirective &S); ~LastprivateConditionalRAII(); }; llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; } protected: CodeGenModule &CGM; StringRef FirstSeparator, Separator; /// An OpenMP-IR-Builder instance. llvm::OpenMPIRBuilder OMPBuilder; /// Constructor allowing to redefine the name separator for the variables. explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator, StringRef Separator); /// Creates offloading entry for the provided entry ID \a ID, /// address \a Addr, size \a Size, and flags \a Flags. virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Helper to emit outlined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Lambda codegen specific to an accelerator device. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emits object of ident_t type with info for source location. /// \param Flags Flags for OpenMP location. /// llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags = 0); /// Returns pointer to ident_t type. llvm::Type *getIdentTyPointerTy(); /// Gets thread id value for the current thread. /// llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc); /// Get the function name of an outlined region. // The name can be customized depending on the target. // virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; } /// Emits \p Callee function call with arguments \p Args with location \p Loc. void emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee Callee, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits address of the word in a memory where current thread id is /// stored. virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc); void setLocThreadIdInsertPt(CodeGenFunction &CGF, bool AtCurrentPoint = false); void clearLocThreadIdInsertPt(CodeGenFunction &CGF); /// Check if the default location must be constant. /// Default is false to support OMPT/OMPD. virtual bool isDefaultLocationConstant() const { return false; } /// Returns additional flags that can be stored in reserved_2 field of the /// default location. virtual unsigned getDefaultLocationReserved2Flags() const { return 0; } /// Returns default flags for the barriers depending on the directive, for /// which this barier is going to be emitted. static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind); /// Get the LLVM type for the critical name. llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;} /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// llvm::Value *getCriticalRegionLock(StringRef CriticalName); private: /// Map for SourceLocation and OpenMP runtime library debug locations. typedef llvm::DenseMap<SourceLocation, llvm::Value *> OpenMPDebugLocMapTy; OpenMPDebugLocMapTy OpenMPDebugLocMap; /// The type for a microtask which gets passed to __kmpc_fork_call(). /// Original representation is: /// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...); llvm::FunctionType *Kmpc_MicroTy = nullptr; /// Stores debug location and ThreadID for the function. struct DebugLocThreadIdTy { llvm::Value *DebugLoc; llvm::Value *ThreadID; /// Insert point for the service instructions. llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr; }; /// Map of local debug location, ThreadId and functions. typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy> OpenMPLocThreadIDMapTy; OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap; /// Map of UDRs and corresponding combiner/initializer. typedef llvm::DenseMap<const OMPDeclareReductionDecl *, std::pair<llvm::Function *, llvm::Function *>> UDRMapTy; UDRMapTy UDRMap; /// Map of functions and locally defined UDRs. typedef llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareReductionDecl *, 4>> FunctionUDRMapTy; FunctionUDRMapTy FunctionUDRMap; /// Map from the user-defined mapper declaration to its corresponding /// functions. llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap; /// Map of functions and their local user-defined mappers. using FunctionUDMMapTy = llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareMapperDecl *, 4>>; FunctionUDMMapTy FunctionUDMMap; /// Maps local variables marked as lastprivate conditional to their internal /// types. llvm::DenseMap<llvm::Function *, llvm::DenseMap<CanonicalDeclPtr<const Decl>, std::tuple<QualType, const FieldDecl *, const FieldDecl *, LValue>>> LastprivateConditionalToTypes; /// Maps function to the position of the untied task locals stack. llvm::DenseMap<llvm::Function *, unsigned> FunctionToUntiedTaskStackMap; /// Type kmp_critical_name, originally defined as typedef kmp_int32 /// kmp_critical_name[8]; llvm::ArrayType *KmpCriticalNameTy; /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator> InternalVars; /// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); llvm::Type *KmpRoutineEntryPtrTy = nullptr; QualType KmpRoutineEntryPtrQTy; /// Type typedef struct kmp_task { /// void * shareds; /**< pointer to block of pointers to /// shared vars */ /// kmp_routine_entry_t routine; /**< pointer to routine to call for /// executing task */ /// kmp_int32 part_id; /**< part id for the task */ /// kmp_routine_entry_t destructors; /* pointer to function to invoke /// deconstructors of firstprivate C++ objects */ /// } kmp_task_t; QualType KmpTaskTQTy; /// Saved kmp_task_t for task directive. QualType SavedKmpTaskTQTy; /// Saved kmp_task_t for taskloop-based directive. QualType SavedKmpTaskloopTQTy; /// Type typedef struct kmp_depend_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool in:1; /// bool out:1; /// } flags; /// } kmp_depend_info_t; QualType KmpDependInfoTy; /// Type typedef struct kmp_task_affinity_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool flag1 : 1; /// bool flag2 : 1; /// kmp_int32 reserved : 30; /// } flags; /// } kmp_task_affinity_info_t; QualType KmpTaskAffinityInfoTy; /// struct kmp_dim { // loop bounds info casted to kmp_int64 /// kmp_int64 lo; // lower /// kmp_int64 up; // upper /// kmp_int64 st; // stride /// }; QualType KmpDimTy; /// Type struct __tgt_offload_entry{ /// void *addr; // Pointer to the offload entry info. /// // (function or global) /// char *name; // Name of the function or global. /// size_t size; // Size of the entry info (0 if it a function). /// int32_t flags; /// int32_t reserved; /// }; QualType TgtOffloadEntryQTy; /// Entity that registers the offloading constants that were emitted so /// far. class OffloadEntriesInfoManagerTy { CodeGenModule &CGM; /// Number of entries registered so far. unsigned OffloadingEntriesNum = 0; public: /// Base class of the entries info. class OffloadEntryInfo { public: /// Kind of a given entry. enum OffloadingEntryInfoKinds : unsigned { /// Entry is a target region. OffloadingEntryInfoTargetRegion = 0, /// Entry is a declare target variable. OffloadingEntryInfoDeviceGlobalVar = 1, /// Invalid entry info. OffloadingEntryInfoInvalid = ~0u }; protected: OffloadEntryInfo() = delete; explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {} explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags) : Flags(Flags), Order(Order), Kind(Kind) {} ~OffloadEntryInfo() = default; public: bool isValid() const { return Order != ~0u; } unsigned getOrder() const { return Order; } OffloadingEntryInfoKinds getKind() const { return Kind; } uint32_t getFlags() const { return Flags; } void setFlags(uint32_t NewFlags) { Flags = NewFlags; } llvm::Constant *getAddress() const { return cast_or_null<llvm::Constant>(Addr); } void setAddress(llvm::Constant *V) { assert(!Addr.pointsToAliveValue() && "Address has been set before!"); Addr = V; } static bool classof(const OffloadEntryInfo *Info) { return true; } private: /// Address of the entity that has to be mapped for offloading. llvm::WeakTrackingVH Addr; /// Flags associated with the device global. uint32_t Flags = 0u; /// Order this entry was emitted. unsigned Order = ~0u; OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid; }; /// Return true if a there are no entries defined. bool empty() const; /// Return number of entries defined so far. unsigned size() const { return OffloadingEntriesNum; } OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {} // // Target region entries related. // /// Kind of the target registry entry. enum OMPTargetRegionEntryKind : uint32_t { /// Mark the entry as target region. OMPTargetRegionEntryTargetRegion = 0x0, /// Mark the entry as a global constructor. OMPTargetRegionEntryCtor = 0x02, /// Mark the entry as a global destructor. OMPTargetRegionEntryDtor = 0x04, }; /// Target region entries info. class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo { /// Address that can be used as the ID of the entry. llvm::Constant *ID = nullptr; public: OffloadEntryInfoTargetRegion() : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {} explicit OffloadEntryInfoTargetRegion(unsigned Order, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags), ID(ID) { setAddress(Addr); } llvm::Constant *getID() const { return ID; } void setID(llvm::Constant *V) { assert(!ID && "ID has been set before!"); ID = V; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoTargetRegion; } }; /// Initialize target region entry. void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, unsigned Order); /// Register target region entry. void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags); /// Return true if a target region entry with the provided information /// exists. bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, bool IgnoreAddressId = false) const; /// brief Applies action \a Action on all registered entries. typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy; void actOnTargetRegionEntriesInfo( const OffloadTargetRegionEntryInfoActTy &Action); // // Device global variable entries related. // /// Kind of the global variable entry.. enum OMPTargetGlobalVarEntryKind : uint32_t { /// Mark the entry as a to declare target. OMPTargetGlobalVarEntryTo = 0x0, /// Mark the entry as a to declare target link. OMPTargetGlobalVarEntryLink = 0x1, }; /// Device global variable entries info. class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo { /// Type of the global variable. CharUnits VarSize; llvm::GlobalValue::LinkageTypes Linkage; public: OffloadEntryInfoDeviceGlobalVar() : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {} explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {} explicit OffloadEntryInfoDeviceGlobalVar( unsigned Order, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags), VarSize(VarSize), Linkage(Linkage) { setAddress(Addr); } CharUnits getVarSize() const { return VarSize; } void setVarSize(CharUnits Size) { VarSize = Size; } llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; } void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar; } }; /// Initialize device global variable entry. void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order); /// Register device global variable entry. void registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Checks if the variable with the given name has been registered already. bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const { return OffloadEntriesDeviceGlobalVar.count(VarName) > 0; } /// Applies action \a Action on all registered entries. typedef llvm::function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy; void actOnDeviceGlobalVarEntriesInfo( const OffloadDeviceGlobalVarEntryInfoActTy &Action); private: // Storage for target region entries kind. The storage is to be indexed by // file ID, device ID, parent function name and line number. typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion> OffloadEntriesTargetRegionPerLine; typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine> OffloadEntriesTargetRegionPerParentName; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName> OffloadEntriesTargetRegionPerFile; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile> OffloadEntriesTargetRegionPerDevice; typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy; OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion; /// Storage for device global variable entries kind. The storage is to be /// indexed by mangled name. typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar> OffloadEntriesDeviceGlobalVarTy; OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar; }; OffloadEntriesInfoManagerTy OffloadEntriesInfoManager; bool ShouldMarkAsGlobal = true; /// List of the emitted declarations. llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls; /// List of the global variables with their addresses that should not be /// emitted for the target. llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables; /// List of variables that can become declare target implicitly and, thus, /// must be emitted. llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables; using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>; /// Stack for list of declarations in current context marked as nontemporal. /// The set is the union of all current stack elements. llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack; using UntiedLocalVarsAddressesMap = llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>>; llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack; /// Stack for list of addresses of declarations in current context marked as /// lastprivate conditional. The set is the union of all current stack /// elements. llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack; /// Flag for keeping track of weather a requires unified_shared_memory /// directive is present. bool HasRequiresUnifiedSharedMemory = false; /// Atomic ordering from the omp requires directive. llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic; /// Flag for keeping track of weather a target region has been emitted. bool HasEmittedTargetRegion = false; /// Flag for keeping track of weather a device routine has been emitted. /// Device routines are specific to the bool HasEmittedDeclareTargetRegion = false; /// Loads all the offload entries information from the host IR /// metadata. void loadOffloadInfoMetadata(); /// Returns __tgt_offload_entry type. QualType getTgtOffloadEntryQTy(); /// Start scanning from statement \a S and and emit all target regions /// found along the way. /// \param S Starting statement. /// \param ParentName Name of the function declaration that is being scanned. void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName); /// Build type kmp_routine_entry_t (if not built yet). void emitKmpRoutineEntryT(QualType KmpInt32Ty); /// Returns pointer to kmpc_micro type. llvm::Type *getKmpc_MicroPointerTy(); /// Returns __kmpc_for_static_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_next_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_fini_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned); /// If the specified mangled name is not in the module, create and /// return threadprivate cache object. This object is a pointer's worth of /// storage that's reserved for use by the OpenMP runtime. /// \param VD Threadprivate variable. /// \return Cache variable for the specified threadprivate. llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace = 0); /// Set of threadprivate variables with the generated initializer. llvm::StringSet<> ThreadPrivateWithDefinition; /// Set of declare target variables with the generated initializer. llvm::StringSet<> DeclareTargetWithDefinition; /// Emits initialization code for the threadprivate variables. /// \param VDAddr Address of the global variable \a VD. /// \param Ctor Pointer to a global init function for \a VD. /// \param CopyCtor Pointer to a global copy function for \a VD. /// \param Dtor Pointer to a global destructor function for \a VD. /// \param Loc Location of threadprivate declaration. void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor, llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc); /// Emit the array initialization or deletion portion for user-defined mapper /// code generation. void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *BasePtr, llvm::Value *Ptr, llvm::Value *Size, llvm::Value *MapType, CharUnits ElementSize, llvm::BasicBlock *ExitBB, bool IsInit); struct TaskResultTy { llvm::Value *NewTask = nullptr; llvm::Function *TaskEntry = nullptr; llvm::Value *NewTaskNewTaskTTy = nullptr; LValue TDBase; const RecordDecl *KmpTaskTQTyRD = nullptr; llvm::Value *TaskDupFn = nullptr; }; /// Emit task region for the task directive. The task region is emitted in /// several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const OMPTaskDataTy &Data); /// Returns default address space for the constant firstprivates, 0 by /// default. virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; } /// Emit code that pushes the trip count of loops associated with constructs /// 'target teams distribute' and 'teams distribute parallel for'. /// \param SizeEmitter Emits the int64 value for the number of iterations of /// the associated loop. void emitTargetNumIterationsCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Value *DeviceID, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit update for lastprivate conditional data. void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal, StringRef UniqueDeclName, LValue LVal, SourceLocation Loc); /// Returns the number of the elements and the address of the depobj /// dependency array. /// \return Number of elements in depobj array and the pointer to the array of /// dependencies. std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); public: explicit CGOpenMPRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM, ".", ".") {} virtual ~CGOpenMPRuntime() {} virtual void clear(); /// Emits code for OpenMP 'if' clause using specified \a CodeGen /// function. Here is the logic: /// if (Cond) { /// ThenGen(); /// } else { /// ElseGen(); /// } void emitIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen); /// Checks if the \p Body is the \a CompoundStmt and returns its child /// statement iff there is only one that is not evaluatable at the compile /// time. static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body); /// Get the platform-specific name separator. std::string getName(ArrayRef<StringRef> Parts) const; /// Emit code for the specified user defined reduction construct. virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D); /// Get combiner/initializer for the specified user-defined reduction, if any. virtual std::pair<llvm::Function *, llvm::Function *> getUserDefinedReduction(const OMPDeclareReductionDecl *D); /// Emit the function for the user defined mapper construct. void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF = nullptr); /// Get the function for the specified user-defined mapper. If it does not /// exist, create one. llvm::Function * getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D); /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitParallelOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitTeamsOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// virtual llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts); /// Cleans up references to the objects in finished function. /// virtual void functionFinished(CodeGenFunction &CGF); /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond); /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr); /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. virtual void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc); /// Emits code for a taskyield directive. virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. virtual void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc); /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. virtual void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps); /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. virtual void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads); /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false); /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of distribute directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static chunked. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is dynamic. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause. /// virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const; /// struct with the values to be passed to the dispatch runtime function struct DispatchRTInput { /// Loop lower bound llvm::Value *LB = nullptr; /// Loop upper bound llvm::Value *UB = nullptr; /// Chunk size specified using 'schedule' clause (nullptr if chunk /// was not specified) llvm::Value *Chunk = nullptr; DispatchRTInput() = default; DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk) : LB(LB), UB(UB), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues); /// Struct with the values to be passed to the static runtime function struct StaticRTInput { /// Size of the iteration variable in bits. unsigned IVSize = 0; /// Sign of the iteration variable. bool IVSigned = false; /// true if loop is ordered, false otherwise. bool Ordered = false; /// Address of the output variable in which the flag of the last iteration /// is returned. Address IL = Address::invalid(); /// Address of the output variable in which the lower iteration number is /// returned. Address LB = Address::invalid(); /// Address of the output variable in which the upper iteration number is /// returned. Address UB = Address::invalid(); /// Address of the output variable in which the stride value is returned /// necessary to generated the static_chunked scheduled loop. Address ST = Address::invalid(); /// Value of the chunk for the static_chunked scheduled loop. For the /// default (nullptr) value, the chunk 1 will be used. llvm::Value *Chunk = nullptr; StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL, Address LB, Address UB, Address ST, llvm::Value *Chunk = nullptr) : IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB), UB(UB), ST(ST), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values); /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values); /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned); /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind); /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST); /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc); /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc); /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc); /// Returns the address of the variable marked as declare target with link /// clause OR as declare target with to clause and unified memory. virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD); /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. virtual llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr); /// Emit a code for initialization of declare target variable. /// \param VD Declare target variable. /// \param Addr Address of the global variable \a VD. /// \param PerformInit true if initialization expression is not constant. virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD, llvm::GlobalVariable *Addr, bool PerformInit); /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name); /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO); /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit code for the directive that does not require outlining. /// /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param HasCancel true if region has inner cancel directive, false /// otherwise. virtual void emitInlinedDirective(CodeGenFunction &CGF, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool HasCancel = false); /// Emits reduction function. /// \param ArgsType Array type containing pointers to reduction variables. /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. llvm::Function *emitReductionFunction(SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps); /// Emits single reduction combiner void emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS); struct ReductionOptionsTy { bool WithNowait; bool SimpleReduction; OpenMPDirectiveKind ReductionKind; }; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options); /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data); /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction); /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N); /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal); /// Emit code for 'taskwait' directive. virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// virtual void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion); /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion); /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. /// \param SizeEmitter Callback to emit number of iterations for loop-based /// directives. virtual void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. virtual bool emitTargetFunctions(GlobalDecl GD); /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. virtual bool emitTargetGlobalVariable(GlobalDecl GD); /// Checks if the provided global decl \a GD is a declare target variable and /// registers it when emitting code for the host. virtual void registerTargetGlobalVariable(const VarDecl *VD, llvm::Constant *Addr); /// Registers provided target firstprivate variable as global on the /// target. llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF, const VarDecl *VD); /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. virtual bool emitTargetGlobal(GlobalDecl GD); /// Creates and returns a registration function for when at least one /// requires directives was used in the current module. llvm::Function *emitRequiresDirectiveRegFun(); /// Creates all the offload entries in the current compilation unit /// along with the associated metadata. void createOffloadEntriesAndInfoMetadata(); /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// virtual void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars); /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc); /// Struct that keeps all the relevant information that should be kept /// throughout a 'target data' region. class TargetDataInfo { /// Set to true if device pointer information have to be obtained. bool RequiresDevicePointerInfo = false; /// Set to true if Clang emits separate runtime calls for the beginning and /// end of the region. These calls might have separate map type arrays. bool SeparateBeginEndCalls = false; public: /// The array of base pointer passed to the runtime library. llvm::Value *BasePointersArray = nullptr; /// The array of section pointers passed to the runtime library. llvm::Value *PointersArray = nullptr; /// The array of sizes passed to the runtime library. llvm::Value *SizesArray = nullptr; /// The array of map types passed to the runtime library for the beginning /// of the region or for the entire region if there are no separate map /// types for the region end. llvm::Value *MapTypesArray = nullptr; /// The array of map types passed to the runtime library for the end of the /// region, or nullptr if there are no separate map types for the region /// end. llvm::Value *MapTypesArrayEnd = nullptr; /// The array of user-defined mappers passed to the runtime library. llvm::Value *MappersArray = nullptr; /// The array of original declaration names of mapped pointers sent to the /// runtime library for debugging llvm::Value *MapNamesArray = nullptr; /// Indicate whether any user-defined mapper exists. bool HasMapper = false; /// The total number of pointers passed to the runtime library. unsigned NumberOfPtrs = 0u; /// Map between the a declaration of a capture and the corresponding base /// pointer address where the runtime returns the device pointers. llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap; explicit TargetDataInfo() {} explicit TargetDataInfo(bool RequiresDevicePointerInfo, bool SeparateBeginEndCalls) : RequiresDevicePointerInfo(RequiresDevicePointerInfo), SeparateBeginEndCalls(SeparateBeginEndCalls) {} /// Clear information about the data arrays. void clearArrayInfo() { BasePointersArray = nullptr; PointersArray = nullptr; SizesArray = nullptr; MapTypesArray = nullptr; MapTypesArrayEnd = nullptr; MapNamesArray = nullptr; MappersArray = nullptr; HasMapper = false; NumberOfPtrs = 0u; } /// Return true if the current target data information has valid arrays. bool isValid() { return BasePointersArray && PointersArray && SizesArray && MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs; } bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; } bool separateBeginEndCalls() { return SeparateBeginEndCalls; } }; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. virtual void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info); /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device); /// Marks function \a Fn with properly mangled versions of vector functions. /// \param FD Function marked as 'declare simd'. /// \param Fn LLVM function that must be marked with 'declare simd' /// attributes. virtual void emitDeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn); /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations); /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. virtual void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C); /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. virtual const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const { return NativeParam; } /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. virtual Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const; /// Choose default schedule type and chunk value for the /// dist_schedule clause. virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind, llvm::Value *&Chunk) const {} /// Choose default schedule type and chunk value for the /// schedule clause. virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const; /// Emits call of the outlined function with the provided arguments, /// translating these arguments to correct target-specific arguments. virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits OpenMP-specific function prolog. /// Required for device constructs. virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D); /// Gets the OpenMP-specific address of the local variable. virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD); /// Marks the declaration as already emitted for the device code and returns /// true, if it was marked already, and false, otherwise. bool markAsGlobalTarget(GlobalDecl GD); /// Emit deferred declare target variables marked for deferred emission. void emitDeferredTargetDecls() const; /// Adjust some parameters for the target-based directives, like addresses of /// the variables captured by reference in lambdas. virtual void adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF, const OMPExecutableDirective &D) const; /// Perform check on requires decl to ensure that target architecture /// supports unified addressing virtual void processRequiresDirective(const OMPRequiresDecl *D); /// Gets default memory ordering as specified in requires directive. llvm::AtomicOrdering getDefaultMemoryOrdering() const; /// Checks if the variable has associated OMPAllocateDeclAttr attribute with /// the predefined allocator and translates it into the corresponding address /// space. virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS); /// Return whether the unified_shared_memory has been specified. bool hasRequiresUnifiedSharedMemory() const; /// Checks if the \p VD variable is marked as nontemporal declaration in /// current context. bool isNontemporalDecl(const ValueDecl *VD) const; /// Create specialized alloca to handle lastprivate conditionals. Address emitLastprivateConditionalInit(CodeGenFunction &CGF, const VarDecl *VD); /// Checks if the provided \p LVal is lastprivate conditional and emits the /// code to update the value of the original variable. /// \code /// lastprivate(conditional: a) /// ... /// <type> a; /// lp_a = ...; /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// \endcode virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS); /// Checks if the lastprivate conditional was updated in inner region and /// writes the value. /// \code /// lastprivate(conditional: a) /// ... /// <type> a;bool Fired = false; /// #pragma omp ... shared(a) /// { /// lp_a = ...; /// Fired = true; /// } /// if (Fired) { /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// Fired = false; /// } /// \endcode virtual void checkAndEmitSharedLastprivateConditional( CodeGenFunction &CGF, const OMPExecutableDirective &D, const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls); /// Gets the address of the global copy used for lastprivate conditional /// update, if any. /// \param PrivLVal LValue for the private copy. /// \param VD Original lastprivate declaration. virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs). /// \returns Pointer to the first element of the array casted to VoidPtr type. std::pair<llvm::Value *, Address> emitDependClause(CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs) for depobj construct. In this case, the /// variable is allocated in dynamically. \returns Pointer to the first /// element of the array casted to VoidPtr type. Address emitDepobjDependClause(CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies, SourceLocation Loc); /// Emits the code to destroy the dependency object provided in depobj /// directive. void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); /// Updates the dependency kind in the specified depobj object. /// \param DepobjLVal LValue for the main depobj object. /// \param NewDepKind New dependency kind. void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, OpenMPDependClauseKind NewDepKind, SourceLocation Loc); /// Initializes user defined allocators specified in the uses_allocators /// clauses. void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator, const Expr *AllocatorTraits); /// Destroys user defined allocators specified in the uses_allocators clause. void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator); /// Returns true if the variable is a local variable in untied task. bool isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const; }; /// Class supports emissionof SIMD-only code. class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime { public: explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {} ~CGOpenMPSIMDRuntime() override {} /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts) override; /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) override; /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr) override; /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc) override; /// Emits code for a taskyield directive. void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc) override; /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) override; /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads) override; /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false) override; /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues) override; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) override; /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) override; /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned) override; /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind) override; /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST) override; /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) override; /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override; /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc) override; /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr) override; /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name) override; /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO) override; /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) override; /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) override; /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction) override; /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions + emits threadprivate variable to /// store the pointer to the original reduction item for the custom /// initializer defined by declare reduction construct. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) override; /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal) override; /// Emit code for 'taskwait' directive. void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion) override; /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion) override; /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) override; /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter) override; /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. bool emitTargetFunctions(GlobalDecl GD) override; /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. bool emitTargetGlobalVariable(GlobalDecl GD) override; /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. bool emitTargetGlobal(GlobalDecl GD) override; /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars) override; /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) override; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) override; /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device) override; /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations) override; /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C) override; /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const override; /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const override; /// Gets the OpenMP-specific address of the local variable. Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD) override { return Address::invalid(); } }; } // namespace CodeGen } // namespace clang #endif
GB_unaryop__ainv_bool_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_int16 // op(A') function: GB_tran__ainv_bool_int16 // C type: bool // A type: int16_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_int16 ( bool *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
// // main.c // openmp_parallel // // Created by Vicente Cubells Nonell on 27/10/14. // Copyright (c) 2014 Vicente Cubells Nonell. All rights reserved. // #include <stdio.h> #include <omp.h> int main(int argc, const char * argv[]) { int n = 1000000; int suma = 0; int i; int numeros[n]; for (i = 0; i < n; ++i) { numeros[i] = 1; } #pragma omp parallel default(none) shared(n, numeros, suma) private(i) num_threads(8) if (n > 100) { int nhilos = omp_get_num_threads(); int id = omp_get_thread_num(); int bloque = n / nhilos; int inicio = id * bloque; int fin = (id + 1) * bloque; for (i = inicio; i < fin; ++i) { // Región crítica #pragma omp critical { suma += numeros[i]; } } printf("La suma del hilo %d de %d = %d\n", id, nhilos, suma); } printf("La suma total es = %d\n", suma); return 0; }
GB_unaryop__abs_int8_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int8_uint64 // op(A') function: GB_tran__abs_int8_uint64 // C type: int8_t // A type: uint64_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int8_uint64 ( int8_t *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int8_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_1x1_pack4to1_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_pack4to1_bf16s_neon(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s2_sgemm_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 4; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const unsigned short* r0 = bottom_blob.channel(p); unsigned short* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { uint16x4_t _v0 = vld1_u16(r0); uint16x4_t _v1 = vld1_u16(r0 + 8); uint16x4_t _v2 = vld1_u16(r0 + 16); uint16x4_t _v3 = vld1_u16(r0 + 24); uint16x8_t _v01 = vcombine_u16(_v0, _v1); uint16x8_t _v23 = vcombine_u16(_v2, _v3); vst1q_u16(outptr, _v01); vst1q_u16(outptr + 8, _v23); r0 += 32; outptr += 16; } for (; j + 1 < outw; j += 2) { uint16x4_t _v0 = vld1_u16(r0); uint16x4_t _v1 = vld1_u16(r0 + 8); uint16x8_t _v = vcombine_u16(_v0, _v1); vst1q_u16(outptr, _v); r0 += 16; outptr += 8; } for (; j < outw; j++) { uint16x4_t _v = vld1_u16(r0); vst1_u16(outptr, _v); r0 += 8; outptr += 4; } r0 += tailstep; } } conv1x1s1_sgemm_pack4to1_bf16s_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
taskConstruct.c
int main() { int x = 10; #pragma omp parallel { int localX = 9; #pragma omp task { localX = x; } localX = 11; } localX = 12; }
GB_unop__identity_fp32_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp32_uint8) // op(A') function: GB (_unop_tran__identity_fp32_uint8) // C type: float // A type: uint8_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp32_uint8) ( float *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp32_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
piOpenMP.c
#include <stdio.h> #include <time.h> int num_steps = 100000000; double step; int main(int argc, char* argv[]) { int i; double start_time, stop_time; double pi, sum=0.0; step = 1./(double)num_steps; start_time = clock(); #pragma omp parallel for for (i = 0; i < num_steps; ++i) { double x = (i+.5)*step; #pragma omp atomic sum += 4.0/(1.+x*x); } pi = sum*step; stop_time = clock(); printf("PIの値 %10.7f\n", pi); printf("PIの計算時間 %1f seconds\n", ((double)(stop_time - start_time)/CLOCKS_PER_SEC)); return 0; }
convolution_winograd_transform_pack8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][8]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _r06 = vld1q_f16(r0 + 48); float16x8_t _r07 = vld1q_f16(r0 + 56); float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f); float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[7][m], _tmp7m); float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f); float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(tmp[5][m], _tmp5m); vst1q_f16(tmp[6][m], _tmp6m); r0 += w * 8; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tiles + j) * 8; __fp16* r0_tm_1 = r0_tm_0 + tiles * 8; __fp16* r0_tm_2 = r0_tm_0 + tiles * 16; __fp16* r0_tm_3 = r0_tm_0 + tiles * 24; __fp16* r0_tm_4 = r0_tm_0 + tiles * 32; __fp16* r0_tm_5 = r0_tm_0 + tiles * 40; __fp16* r0_tm_6 = r0_tm_0 + tiles * 48; __fp16* r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f); float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f); float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f); float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b); float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b); float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); vst1q_f16(r0_tm_6, _r0tm6); vst1q_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } static void conv3x3s1_winograd63_transform_output_pack8_fp16sa_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const __fp16* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float16x8_t _bias0 = biasptr ? vld1q_f16(biasptr + p * 8) : vdupq_n_f16(0.f); __fp16 tmp[6][8][8]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tiles + j) * 8; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 32; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 40; const __fp16* output0_tm_6 = output0_tm_0 + tiles * 48; const __fp16* output0_tm_7 = output0_tm_0 + tiles * 56; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _out0tm0 = vld1q_f16(output0_tm_0); float16x8_t _out0tm1 = vld1q_f16(output0_tm_1); float16x8_t _out0tm2 = vld1q_f16(output0_tm_2); float16x8_t _out0tm3 = vld1q_f16(output0_tm_3); float16x8_t _out0tm4 = vld1q_f16(output0_tm_4); float16x8_t _out0tm5 = vld1q_f16(output0_tm_5); float16x8_t _out0tm6 = vld1q_f16(output0_tm_6); float16x8_t _out0tm7 = vld1q_f16(output0_tm_7); float16x8_t _tmp024a = vaddq_f16(_out0tm1, _out0tm2); float16x8_t _tmp135a = vsubq_f16(_out0tm1, _out0tm2); float16x8_t _tmp024b = vaddq_f16(_out0tm3, _out0tm4); float16x8_t _tmp135b = vsubq_f16(_out0tm3, _out0tm4); float16x8_t _tmp024c = vaddq_f16(_out0tm5, _out0tm6); float16x8_t _tmp135c = vsubq_f16(_out0tm5, _out0tm6); float16x8_t _tmp0m = vaddq_f16(vaddq_f16(_out0tm0, _tmp024a), vfmaq_n_f16(_tmp024b, _tmp024c, 32.f)); float16x8_t _tmp2m = vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float16x8_t _tmp4m = vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[2][m], _tmp2m); vst1q_f16(tmp[4][m], _tmp4m); float16x8_t _tmp1m = vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float16x8_t _tmp3m = vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float16x8_t _tmp5m = vaddq_f16(vaddq_f16(_out0tm7, _tmp135a), vfmaq_n_f16(_tmp135c, _tmp135b, 32.f)); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 64; output0_tm_1 += tiles * 64; output0_tm_2 += tiles * 64; output0_tm_3 += tiles * 64; output0_tm_4 += tiles * 64; output0_tm_5 += tiles * 64; output0_tm_6 += tiles * 64; output0_tm_7 += tiles * 64; } for (int m = 0; m < 6; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _tmp024a = vaddq_f16(_tmp01, _tmp02); float16x8_t _tmp135a = vsubq_f16(_tmp01, _tmp02); float16x8_t _tmp024b = vaddq_f16(_tmp03, _tmp04); float16x8_t _tmp135b = vsubq_f16(_tmp03, _tmp04); float16x8_t _tmp024c = vaddq_f16(_tmp05, _tmp06); float16x8_t _tmp135c = vsubq_f16(_tmp05, _tmp06); float16x8_t _out00 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp00, _tmp024a), vfmaq_n_f16(_tmp024b, _tmp024c, 32.f))); float16x8_t _out02 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float16x8_t _out04 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1q_f16(output0, _out00); vst1q_f16(output0 + 16, _out02); vst1q_f16(output0 + 32, _out04); float16x8_t _out01 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float16x8_t _out03 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float16x8_t _out05 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp07, _tmp135a), vfmaq_n_f16(_tmp135c, _tmp135b, 32.f))); vst1q_f16(output0 + 8, _out01); vst1q_f16(output0 + 24, _out03); vst1q_f16(output0 + 40, _out05); output0 += outw * 8; } } } } } static void conv3x3s1_winograd43_transform_input_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[6][6][8]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _tmp0m = vfmsq_n_f16(vfmaq_n_f16(_r04, _r00, 4.f), _r02, 5.f); float16x8_t _tmp1m = vfmsq_n_f16(vaddq_f16(_r04, _r03), vaddq_f16(_r01, _r02), 4.f); float16x8_t _tmp2m = vfmaq_n_f16(vsubq_f16(_r04, _r03), vsubq_f16(_r01, _r02), 4.f); float16x8_t _tmp3m = vfmsq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f); float16x8_t _tmp4m = vfmaq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f); float16x8_t _tmp5m = vfmsq_n_f16(vfmaq_n_f16(_r05, _r01, 4.f), _r03, 5.f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); vst1q_f16(tmp[5][m], _tmp5m); r0 += w * 8; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tiles + j) * 8; __fp16* r0_tm_1 = r0_tm_0 + tiles * 8; __fp16* r0_tm_2 = r0_tm_0 + tiles * 16; __fp16* r0_tm_3 = r0_tm_0 + tiles * 24; __fp16* r0_tm_4 = r0_tm_0 + tiles * 32; __fp16* r0_tm_5 = r0_tm_0 + tiles * 40; for (int m = 0; m < 6; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _r0tm0 = vfmsq_n_f16(vfmaq_n_f16(_tmp04, _tmp00, 4.f), _tmp02, 5.f); float16x8_t _r0tm1 = vfmsq_n_f16(vaddq_f16(_tmp04, _tmp03), vaddq_f16(_tmp01, _tmp02), 4.f); float16x8_t _r0tm2 = vfmaq_n_f16(vsubq_f16(_tmp04, _tmp03), vsubq_f16(_tmp01, _tmp02), 4.f); float16x8_t _r0tm3 = vfmsq_n_f16(vsubq_f16(_tmp04, _tmp02), vsubq_f16(_tmp01, _tmp03), 2.f); float16x8_t _r0tm4 = vfmaq_n_f16(vsubq_f16(_tmp04, _tmp02), vsubq_f16(_tmp01, _tmp03), 2.f); float16x8_t _r0tm5 = vfmsq_n_f16(vfmaq_n_f16(_tmp05, _tmp01, 4.f), _tmp03, 5.f); vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 48; r0_tm_1 += tiles * 48; r0_tm_2 += tiles * 48; r0_tm_3 += tiles * 48; r0_tm_4 += tiles * 48; r0_tm_5 += tiles * 48; } } } } } static void conv3x3s1_winograd43_transform_output_pack8_fp16sa_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const __fp16* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float16x8_t _bias0 = biasptr ? vld1q_f16(biasptr + p * 8) : vdupq_n_f16(0.f); __fp16 tmp[4][6][8]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tiles + j) * 8; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 32; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 40; __fp16* output0 = out0.row<__fp16>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { float16x8_t _out0tm0 = vld1q_f16(output0_tm_0); float16x8_t _out0tm1 = vld1q_f16(output0_tm_1); float16x8_t _out0tm2 = vld1q_f16(output0_tm_2); float16x8_t _out0tm3 = vld1q_f16(output0_tm_3); float16x8_t _out0tm4 = vld1q_f16(output0_tm_4); float16x8_t _out0tm5 = vld1q_f16(output0_tm_5); float16x8_t _tmp02a = vaddq_f16(_out0tm1, _out0tm2); float16x8_t _tmp13a = vsubq_f16(_out0tm1, _out0tm2); float16x8_t _tmp02b = vaddq_f16(_out0tm3, _out0tm4); float16x8_t _tmp13b = vsubq_f16(_out0tm3, _out0tm4); float16x8_t _tmp0m = vaddq_f16(vaddq_f16(_out0tm0, _tmp02a), _tmp02b); float16x8_t _tmp1m = vfmaq_n_f16(_tmp13a, _tmp13b, 2.f); float16x8_t _tmp2m = vfmaq_n_f16(_tmp02a, _tmp02b, 4.f); float16x8_t _tmp3m = vfmaq_n_f16(vaddq_f16(_out0tm5, _tmp13a), _tmp13b, 8.f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); vst1q_f16(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 48; output0_tm_1 += tiles * 48; output0_tm_2 += tiles * 48; output0_tm_3 += tiles * 48; output0_tm_4 += tiles * 48; output0_tm_5 += tiles * 48; } for (int m = 0; m < 4; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp02a = vaddq_f16(_tmp01, _tmp02); float16x8_t _tmp13a = vsubq_f16(_tmp01, _tmp02); float16x8_t _tmp02b = vaddq_f16(_tmp03, _tmp04); float16x8_t _tmp13b = vsubq_f16(_tmp03, _tmp04); float16x8_t _out00 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp00, _tmp02a), _tmp02b)); float16x8_t _out01 = vaddq_f16(_bias0, vfmaq_n_f16(_tmp13a, _tmp13b, 2.f)); float16x8_t _out02 = vaddq_f16(_bias0, vfmaq_n_f16(_tmp02a, _tmp02b, 4.f)); float16x8_t _out03 = vaddq_f16(_bias0, vfmaq_n_f16(vaddq_f16(_tmp05, _tmp13a), _tmp13b, 8.f)); vst1q_f16(output0, _out00); vst1q_f16(output0 + 8, _out01); vst1q_f16(output0 + 16, _out02); vst1q_f16(output0 + 24, _out03); output0 += outw * 8; } } } } }
ellipticStressPartialAxHex3D.c
extern "C" void FUNC(ellipticStressPartialAxHex3D)(const dlong &Nelements, const dlong &offset, const dlong &loffset, const dlong* __restrict__ elementList, const dfloat* __restrict__ vgeo, const dfloat* __restrict__ D, const dfloat* __restrict__ S, const dfloat* __restrict__ lambda, const dfloat* __restrict__ q, dfloat* __restrict__ Aq) { dfloat s_D[p_Nq][p_Nq]; dfloat s_U[p_Nq][p_Nq][p_Nq]; dfloat s_V[p_Nq][p_Nq][p_Nq]; dfloat s_W[p_Nq][p_Nq][p_Nq]; dfloat s_SUr[p_Nq][p_Nq][p_Nq]; dfloat s_SUs[p_Nq][p_Nq][p_Nq]; dfloat s_SUt[p_Nq][p_Nq][p_Nq]; dfloat s_SVr[p_Nq][p_Nq][p_Nq]; dfloat s_SVs[p_Nq][p_Nq][p_Nq]; dfloat s_SVt[p_Nq][p_Nq][p_Nq]; dfloat s_SWr[p_Nq][p_Nq][p_Nq]; dfloat s_SWs[p_Nq][p_Nq][p_Nq]; dfloat s_SWt[p_Nq][p_Nq][p_Nq]; for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) s_D[j][i] = D[j * p_Nq + i]; #ifdef __NEKRS__OMP__ #pragma omp parallel for private(s_U, s_V, s_W, s_SUr, s_SUs, s_SUt, s_SVr, s_SVs, s_SVt, s_SWr, s_SWs, s_SWt) #endif for(dlong elem = 0; elem < Nelements; ++elem) { dlong e = elementList[elem]; for(int k = 0; k < p_Nq; ++k) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong id = e * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; s_U[k][j][i] = q[id + 0 * offset]; s_V[k][j][i] = q[id + 1 * offset]; s_W[k][j][i] = q[id + 2 * offset]; } // loop over slabs for(int k = 0; k < p_Nq; ++k) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong gid = i + j * p_Nq + k * p_Nq * p_Nq + e * p_Np * p_Nvgeo; const dfloat rx = vgeo[gid + p_RXID * p_Np]; const dfloat ry = vgeo[gid + p_RYID * p_Np]; const dfloat rz = vgeo[gid + p_RZID * p_Np]; const dfloat sx = vgeo[gid + p_SXID * p_Np]; const dfloat sy = vgeo[gid + p_SYID * p_Np]; const dfloat sz = vgeo[gid + p_SZID * p_Np]; const dfloat tx = vgeo[gid + p_TXID * p_Np]; const dfloat ty = vgeo[gid + p_TYID * p_Np]; const dfloat tz = vgeo[gid + p_TZID * p_Np]; const dfloat JW = vgeo[gid + p_JWID * p_Np]; // compute 1D derivatives dfloat ur = 0.f, us = 0.f, ut = 0.f; dfloat vr = 0.f, vs = 0.f, vt = 0.f; dfloat wr = 0.f, ws = 0.f, wt = 0.f; for(int m = 0; m < p_Nq; ++m) { const dfloat Dim = s_D[i][m]; // Dr const dfloat Djm = s_D[j][m]; // Ds const dfloat Dkm = s_D[k][m]; // Dt ur += Dim * s_U[k][j][m]; us += Djm * s_U[k][m][i]; ut += Dkm * s_U[m][j][i]; // vr += Dim * s_V[k][j][m]; vs += Djm * s_V[k][m][i]; vt += Dkm * s_V[m][j][i]; // wr += Dim * s_W[k][j][m]; ws += Djm * s_W[k][m][i]; wt += Dkm * s_W[m][j][i]; } const dlong id = e * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat u_lam0 = lambda[0 * offset + 0 * loffset]; const dfloat v_lam0 = lambda[0 * offset + 1 * loffset]; const dfloat w_lam0 = lambda[0 * offset + 2 * loffset]; const dfloat dudx = rx * ur + sx * us + tx * ut; const dfloat dudy = ry * ur + sy * us + ty * ut; const dfloat dudz = rz * ur + sz * us + tz * ut; const dfloat dvdx = rx * vr + sx * vs + tx * vt; const dfloat dvdy = ry * vr + sy * vs + ty * vt; const dfloat dvdz = rz * vr + sz * vs + tz * vt; const dfloat dwdx = rx * wr + sx * ws + tx * wt; const dfloat dwdy = ry * wr + sy * ws + ty * wt; const dfloat dwdz = rz * wr + sz * ws + tz * wt; const dfloat s11 = u_lam0 * JW * (dudx + dudx); const dfloat s12 = u_lam0 * JW * (dudy + dvdx); const dfloat s13 = u_lam0 * JW * (dudz + dwdx); const dfloat s21 = v_lam0 * JW * (dvdx + dudy); const dfloat s22 = v_lam0 * JW * (dvdy + dvdy); const dfloat s23 = v_lam0 * JW * (dvdz + dwdy); const dfloat s31 = w_lam0 * JW * (dwdx + dudz); const dfloat s32 = w_lam0 * JW * (dwdy + dvdz); const dfloat s33 = w_lam0 * JW * (dwdz + dwdz); s_SUr[k][j][i] = rx * s11 + ry * s12 + rz * s13; s_SUs[k][j][i] = sx * s11 + sy * s12 + sz * s13; s_SUt[k][j][i] = tx * s11 + ty * s12 + tz * s13; // s_SVr[k][j][i] = rx * s21 + ry * s22 + rz * s23; s_SVs[k][j][i] = sx * s21 + sy * s22 + sz * s23; s_SVt[k][j][i] = tx * s21 + ty * s22 + tz * s23; // s_SWr[k][j][i] = rx * s31 + ry * s32 + rz * s33; s_SWs[k][j][i] = sx * s31 + sy * s32 + sz * s33; s_SWt[k][j][i] = tx * s31 + ty * s32 + tz * s33; } // loop over slabs for(int k = 0; k < p_Nq; ++k) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { dfloat r_Au = 0.f, r_Av = 0.f, r_Aw = 0.f; for(int m = 0; m < p_Nq; m++) { const dfloat Dim = s_D[m][i]; // Dr' const dfloat Djm = s_D[m][j]; // Ds' const dfloat Dkm = s_D[m][k]; // Dt' r_Au += Dim * s_SUr[k][j][m]; r_Au += Djm * s_SUs[k][m][i]; r_Au += Dkm * s_SUt[m][j][i]; r_Av += Dim * s_SVr[k][j][m]; r_Av += Djm * s_SVs[k][m][i]; r_Av += Dkm * s_SVt[m][j][i]; r_Aw += Dim * s_SWr[k][j][m]; r_Aw += Djm * s_SWs[k][m][i]; r_Aw += Dkm * s_SWt[m][j][i]; } const dlong id = e * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat u_lam1 = lambda[1 * offset + 0 * loffset]; const dfloat v_lam1 = lambda[1 * offset + 1 * loffset]; const dfloat w_lam1 = lambda[1 * offset + 2 * loffset]; const dlong gid = i + j * p_Nq + k * p_Nq * p_Nq + e * p_Np * p_Nvgeo; const dfloat JW = vgeo[gid + p_JWID * p_Np]; Aq[id + 0 * offset] = r_Au + u_lam1 * JW * s_U[k][j][i]; Aq[id + 1 * offset] = r_Av + v_lam1 * JW * s_V[k][j][i]; Aq[id + 2 * offset] = r_Aw + w_lam1 * JW * s_W[k][j][i]; } } }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
gbdt.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include <LightGBM/json11.hpp> #include "score_updater.hpp" using namespace json11; namespace LightGBM { /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter_new(const score_t* gradients, const score_t* hessians,const score_t* gradients2, const score_t* hessians2) override; bool TrainOneIter_old(const score_t* gradients, const score_t* hessians); bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_ * num_labels_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_preb_in_one_row = num_class_ * num_labels_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); if (num_iteration > 0) { num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration)); } else { num_preb_in_one_row *= max_iteration; } } else if (is_pred_contrib) { num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_preb_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override; bool SaveModelToFile(int start_iteration, int num_iterations, int num_labels, int num_label, const char* filename) override; bool SetNumlabels(int num_labels) override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \return Non-empty string if succeeded */ std::string SaveModelToString(int start_iteration, int num_iterations) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_); } if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char* SubModelName() const override { return "tree"; } protected: /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); /*! * \brief Helper function for bagging, used for multi-threading optimization * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief Helper function for bagging, used for multi-threading optimization, balanced sampling * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BalancedBaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief First order derivative of training data */ std::vector<score_t> gradients_; /*! \brief Secend order derivative of training data */ std::vector<score_t> hessians_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> tmp_indices_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Number of labels */ int num_labels_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; /*! \brief number of threads */ int num_threads_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> offsets_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_write_pos_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_write_pos_buf_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; Json forced_splits_json_; int save_num_label = -1; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
GB_binop__isne_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isne_fc64 // A.*B function (eWiseMult): GB_AemultB__isne_fc64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__isne_fc64 // C+=b function (dense accum): GB_Cdense_accumb__isne_fc64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_fc64 // C=scalar+B GB_bind1st__isne_fc64 // C=scalar+B' GB_bind1st_tran__isne_fc64 // C=A+scalar GB_bind2nd__isne_fc64 // C=A'+scalar GB_bind2nd_tran__isne_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_isne (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC64_isne (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_FC64 || GxB_NO_ISNE_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isne_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isne_fc64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isne_fc64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__isne_fc64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isne_fc64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isne_fc64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = Bx [p] ; Cx [p] = GB_FC64_isne (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isne_fc64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = Ax [p] ; Cx [p] = GB_FC64_isne (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_isne (x, aij) ; \ } GrB_Info GB_bind1st_tran__isne_fc64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_isne (aij, y) ; \ } GrB_Info GB_bind2nd_tran__isne_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
apply-kernels.h
#ifndef __ARRAY_APPLY_KERNELS_H__ #define __ARRAY_APPLY_KERNELS_H__ #include <type_traits> #include <omp.h> #include "../../../macros/macros.h" #include "../../../meta/meta.h" #include "../../../types/types.h" namespace __core__ { namespace __functional__ { namespace __apply__ { namespace __array__ { namespace __private__ { #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunknown-pragmas" template <typename fn_T,int threadnum,typename T,typename IT,typename... Args> __optimize__ void __apply_function_ckernel__(T* arr,IT size,Args... args) { if(threadnum>1) { #pragma omp parallel for num_threads(threadnum) for(IT i=0;i<size;++i) arr[i]=fn_T::fn(arr[i],args...); } else { for(IT i=0;i<size;++i) arr[i]=fn_T::fn(arr[i],args...); } } template <typename fn_T,int threadnum,typename T,typename U,typename IT,typename... Args> __optimize__ void __apply_function_ckernel__(T* arr_dst,U* arr_src,IT size,Args... args) { if(threadnum>1) { #pragma omp parallel for num_threads(threadnum) for(IT i=0;i<size;++i) arr_dst[i]=fn_T::fn(arr_src[i],args...); } else { for(IT i=0;i<size;++i) arr_dst[i]=fn_T::fn(arr_src[i],args...); } } template <typename fn_T,int threadnum,typename T,typename V,typename U,typename IT,typename... Args> __optimize__ void __apply_function_ckernel__(T* arr_dst,V* arr1,U* arr2,IT size,Args... args) { if(threadnum>1) { #pragma omp parallel for num_threads(threadnum) for(IT i=0;i<size;++i) arr_dst[i]=fn_T::fn(arr1[i],arr2[i],args...); } else { for(IT i=0;i<size;++i) arr_dst[i]=fn_T::fn(arr1[i],arr2[i],args...); } } template <typename fn_T,int threadnum,typename T,typename V,typename U,typename W,typename IT,typename... Args> __optimize__ void __apply_function_ckernel__(T* arr_dst,V* arr1,U* arr2,W* arr3,IT size,Args... args) { if(threadnum>1) { #pragma omp parallel for num_threads(threadnum) for(IT i=0;i<size;++i) arr_dst[i]=fn_T::fn(arr1[i],arr2[i],arr3[i],args...); } else { for(IT i=0;i<size;++i) arr_dst[i]=fn_T::fn(arr1[i],arr2[i],arr3[i],args...); } } template <typename fn_T,int threadnum,typename T,typename IT,typename... Args> __optimize__ void __apply_function_indexed_ckernel__(T* arr,IT size,Args... args) { if(threadnum>1) { #pragma omp parallel for num_threads(threadnum) for(IT i=0;i<size;++i) arr[i]=fn_T::fn(i,arr[i],args...); } else { for(IT i=0;i<size;++i) arr[i]=fn_T::fn(i,arr[i],args...); } } template <typename fn_T,int threadnum,typename T,typename U,typename IT,typename... Args> __optimize__ void __apply_function_indexed_ckernel__(T* arr_dst,U* arr_src,IT size,Args... args) { if(threadnum>1) { #pragma omp parallel for num_threads(threadnum) for(IT i=0;i<size;++i) arr_dst[i]=fn_T::fn(i,arr_src[i],args...); } else { for(IT i=0;i<size;++i) arr_dst[i]=fn_T::fn(i,arr_src[i],args...); } } template <typename fn_T,int threadnum,typename T,typename V,typename U,typename IT,typename... Args> __optimize__ void __apply_function_indexed_ckernel__(T* arr_dst,V* arr1,U* arr2,IT size,Args... args) { if(threadnum>1) { #pragma omp parallel for num_threads(threadnum) for(IT i=0;i<size;++i) arr_dst[i]=fn_T::fn(i,arr1[i],arr2[i],args...); } else { for(IT i=0;i<size;++i) arr_dst[i]=fn_T::fn(i,arr1[i],arr2[i],args...); } } template <typename fn_T,int threadnum,typename T,typename V,typename U,typename W,typename IT,typename... Args> __optimize__ void __apply_function_indexed_ckernel__(T* arr_dst,V* arr1,U* arr2,W* arr3,IT size,Args... args) { if(threadnum>1) { #pragma omp parallel for num_threads(threadnum) for(IT i=0;i<size;++i) arr_dst[i]=fn_T::fn(i,arr1[i],arr2[i],arr3[i],args...); } else { for(IT i=0;i<size;++i) arr_dst[i]=fn_T::fn(i,arr1[i],arr2[i],arr3[i],args...); } } template <typename fn_T,int threadnum,typename IT,typename... Args> __optimize__ void __apply_function_meta_ckernel__(IT size,Args... args) { if(threadnum>1) { #pragma omp parallel for num_threads(threadnum) for(IT i=0;i<size;++i) fn_T::fn(i,size,args...); } else { for(IT i=0;i<size;++i) fn_T::fn(i,size,args...); } } template <typename fn_T,int threadnum,typename IT,typename... Args> __optimize__ void __apply_function_meta_ckernel__(IT size,IT private_mem_size,Args... args) { if(threadnum>1) { #pragma omp parallel num_threads(threadnum) { uchar *private_mem=malloc(private_mem_size); #pragma omp for for(IT i=0;i<size;++i) fn_T::fn(i,size,private_mem,args...); free(private_mem); } } else { uchar *private_mem=malloc(private_mem_size); for(IT i=0;i<size;++i) fn_T::fn(i,size,private_mem,args...); free(private_mem); } } #pragma GCC diagnostic pop } } } } } #endif
bin_class_metric.h
/** * Copyright (c) 2015 by Contributors */ #ifndef ZDIFACTO_LOSS_BIN_CLASS_METRIC_H_ #define ZDIFACTO_LOSS_BIN_CLASS_METRIC_H_ #include <algorithm> #include <vector> #include <math.h> #include "zdifacto/base.h" #include "dmlc/logging.h" #include "dmlc/omp.h" #include "zdifacto/sarray.h" namespace zdifacto { class BinClassMetric { public: /** * \brief constructor * @param label label vector * @param predict predict vector * @param n length * @param nthreads num threads */ BinClassMetric(const real_t* const label, const real_t* const predict, size_t n, int nthreads = DEFAULT_NTHREADS) : label_(label), predict_(predict), size_(n), nt_(nthreads) { } ~BinClassMetric() { } real_t AUC() { size_t n = size_; struct Entry { real_t label; real_t predict; }; std::vector<Entry> buff(n); for (size_t i = 0; i < n; ++i) { buff[i].label = label_[i]; buff[i].predict = predict_[i]; } std::sort(buff.data(), buff.data()+n, [](const Entry& a, const Entry&b) { return a.predict < b.predict; }); real_t area = 0, cum_tp = 0; for (size_t i = 0; i < n; ++i) { if (buff[i].label > 0) { cum_tp += 1; } else { area += cum_tp; } } if (cum_tp == 0 || cum_tp == n) return 1; area /= cum_tp * (n - cum_tp); return (area < 0.5 ? 1 - area : area) * n; } real_t Accuracy(real_t threshold) { real_t correct = 0; size_t n = size_; #pragma omp parallel for reduction(+:correct) num_threads(nt_) for (size_t i = 0; i < n; ++i) { if ((label_[i] > 0 && predict_[i] > threshold) || (label_[i] <= 0 && predict_[i] <= threshold)) correct += 1; } return correct > 0.5 * n ? correct : n - correct; } real_t LogLoss() { real_t loss = 0; size_t n = size_; #pragma omp parallel for reduction(+:loss) num_threads(nt_) for (size_t i = 0; i < n; ++i) { real_t y = label_[i] > 0; real_t p = 1 / (1 + exp(- predict_[i])); p = p < 1e-10 ? 1e-10 : p; loss += y * log(p) + (1 - y) * log(1 - p); } return - loss; } real_t LogitObjv() { real_t objv = 0; #pragma omp parallel for reduction(+:objv) num_threads(nt_) for (size_t i = 0; i < size_; ++i) { real_t y = label_[i] > 0 ? 1 : -1; objv += log(1 + exp(- y * predict_[i])); } return objv; } private: real_t const* label_; real_t const* predict_; size_t size_; int nt_; }; } #endif
csr_matop.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * * Matrix operation functions for hypre_CSRMatrix class. * *****************************************************************************/ #include <assert.h> #include "seq_mv.h" #include "csr_matrix.h" /*-------------------------------------------------------------------------- * hypre_CSRMatrixAdd: * adds two CSR Matrices A and B and returns a CSR Matrix C; * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_CSRMatrixAdd( hypre_CSRMatrix *A, hypre_CSRMatrix *B ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_Int *C_j; HYPRE_Int ia, ib, ic, jcol, num_nonzeros; HYPRE_Int pos; HYPRE_Int *marker; if (nrows_A != nrows_B || ncols_A != ncols_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, HYPRE_MEMORY_SHARED); for (ia = 0; ia < ncols_A; ia++) marker[ia] = -1; num_nonzeros = 0; C_i[0] = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; marker[jcol] = ic; num_nonzeros++; } for (ib = B_i[ic]; ib < B_i[ic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] != ic) { marker[jcol] = ic; num_nonzeros++; } } C_i[ic+1] = num_nonzeros; } C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize(C); C_j = hypre_CSRMatrixJ(C); C_data = hypre_CSRMatrixData(C); for (ia = 0; ia < ncols_A; ia++) marker[ia] = -1; pos = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; C_j[pos] = jcol; C_data[pos] = A_data[ia]; marker[jcol] = pos; pos++; } for (ib = B_i[ic]; ib < B_i[ic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] < C_i[ic]) { C_j[pos] = jcol; C_data[pos] = B_data[ib]; marker[jcol] = pos; pos++; } else { C_data[marker[jcol]] += B_data[ib]; } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); return C; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixBigAdd: * adds two CSR Matrices A and B and returns a CSR Matrix C; * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_CSRMatrixBigAdd( hypre_CSRMatrix *A, hypre_CSRMatrix *B ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_BigInt *A_j = hypre_CSRMatrixBigJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_BigInt *B_j = hypre_CSRMatrixBigJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_BigInt *C_j; HYPRE_Int ia, ib, ic, num_nonzeros; HYPRE_BigInt jcol; HYPRE_Int pos; HYPRE_Int *marker; if (nrows_A != nrows_B || ncols_A != ncols_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, HYPRE_MEMORY_SHARED); for (ia = 0; ia < ncols_A; ia++) marker[ia] = -1; num_nonzeros = 0; C_i[0] = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; marker[jcol] = ic; num_nonzeros++; } for (ib = B_i[ic]; ib < B_i[ic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] != ic) { marker[jcol] = ic; num_nonzeros++; } } C_i[ic+1] = num_nonzeros; } C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixBigInitialize(C); C_j = hypre_CSRMatrixBigJ(C); C_data = hypre_CSRMatrixData(C); for (ia = 0; ia < ncols_A; ia++) marker[ia] = -1; pos = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; C_j[pos] = jcol; C_data[pos] = A_data[ia]; marker[jcol] = pos; pos++; } for (ib = B_i[ic]; ib < B_i[ic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] < C_i[ic]) { C_j[pos] = jcol; C_data[pos] = B_data[ib]; marker[jcol] = pos; pos++; } else { C_data[marker[jcol]] += B_data[ib]; } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); return C; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixMultiply * multiplies two CSR Matrices A and B and returns a CSR Matrix C; * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_CSRMatrixMultiply( hypre_CSRMatrix *A, hypre_CSRMatrix *B) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_Int *C_j; HYPRE_Int ia, ib, ic, ja, jb, num_nonzeros=0; HYPRE_Int row_start, counter; HYPRE_Complex a_entry, b_entry; HYPRE_Int allsquare = 0; HYPRE_Int max_num_threads; HYPRE_Int *jj_count; if (ncols_A != nrows_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } if (nrows_A == ncols_B) allsquare = 1; C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, HYPRE_MEMORY_SHARED); max_num_threads = hypre_NumThreads(); jj_count = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(ia, ib, ic, ja, jb, num_nonzeros, row_start, counter, a_entry, b_entry) #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, ii, jj; HYPRE_Int size, rest, num_threads; HYPRE_Int i1; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); size = nrows_A/num_threads; rest = nrows_A - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } B_marker = hypre_CTAlloc(HYPRE_Int, ncols_B, HYPRE_MEMORY_HOST); for (ib = 0; ib < ncols_B; ib++) B_marker[ib] = -1; num_nonzeros = 0; for (ic = ns; ic < ne; ic++) { C_i[ic] = num_nonzeros; if (allsquare) { B_marker[ic] = ic; num_nonzeros++; } for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { ja = A_j[ia]; for (ib = B_i[ja]; ib < B_i[ja+1]; ib++) { jb = B_j[ib]; if (B_marker[jb] != ic) { B_marker[jb] = ic; num_nonzeros++; } } } } jj_count[ii] = num_nonzeros; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { jj = jj_count[0]; for (i1 = 1; i1 < ii; i1++) jj += jj_count[i1]; for (i1 = ns; i1 < ne; i1++) C_i[i1] += jj; } else { C_i[nrows_A] = 0; for (i1 = 0; i1 < num_threads; i1++) C_i[nrows_A] += jj_count[i1]; C = hypre_CSRMatrixCreate(nrows_A, ncols_B, C_i[nrows_A]); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize(C); C_j = hypre_CSRMatrixJ(C); C_data = hypre_CSRMatrixData(C); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (ib = 0; ib < ncols_B; ib++) B_marker[ib] = -1; counter = C_i[ns]; for (ic = ns; ic < ne; ic++) { row_start = C_i[ic]; if (allsquare) { B_marker[ic] = counter; C_data[counter] = 0; C_j[counter] = ic; counter++; } for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { ja = A_j[ia]; a_entry = A_data[ia]; for (ib = B_i[ja]; ib < B_i[ja+1]; ib++) { jb = B_j[ib]; b_entry = B_data[ib]; if (B_marker[jb] < row_start) { B_marker[jb] = counter; C_j[B_marker[jb]] = jb; C_data[B_marker[jb]] = a_entry*b_entry; counter++; } else C_data[B_marker[jb]] += a_entry*b_entry; } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); } /*end parallel region */ hypre_TFree(jj_count, HYPRE_MEMORY_HOST); return C; } hypre_CSRMatrix * hypre_CSRMatrixDeleteZeros( hypre_CSRMatrix *A, HYPRE_Real tol) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); hypre_CSRMatrix *B; HYPRE_Complex *B_data; HYPRE_Int *B_i; HYPRE_Int *B_j; HYPRE_Int zeros; HYPRE_Int i, j; HYPRE_Int pos_A, pos_B; zeros = 0; for (i=0; i < num_nonzeros; i++) if (hypre_cabs(A_data[i]) <= tol) zeros++; if (zeros) { B = hypre_CSRMatrixCreate(nrows_A,ncols_A,num_nonzeros-zeros); hypre_CSRMatrixInitialize(B); B_i = hypre_CSRMatrixI(B); B_j = hypre_CSRMatrixJ(B); B_data = hypre_CSRMatrixData(B); B_i[0] = 0; pos_A = 0; pos_B = 0; for (i=0; i < nrows_A; i++) { for (j = A_i[i]; j < A_i[i+1]; j++) { if (hypre_cabs(A_data[j]) <= tol) { pos_A++; } else { B_data[pos_B] = A_data[pos_A]; B_j[pos_B] = A_j[pos_A]; pos_B++; pos_A++; } } B_i[i+1] = pos_B; } return B; } else return NULL; } /****************************************************************************** * * Finds transpose of a hypre_CSRMatrix * *****************************************************************************/ /** * idx = idx2*dim1 + idx1 * -> ret = idx1*dim2 + idx2 * = (idx%dim1)*dim2 + idx/dim1 */ static inline HYPRE_Int transpose_idx(HYPRE_Int idx, HYPRE_Int dim1, HYPRE_Int dim2) { return idx%dim1*dim2 + idx/dim1; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixTranspose(hypre_CSRMatrix *A, hypre_CSRMatrix **AT, HYPRE_Int data) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int num_rowsA = hypre_CSRMatrixNumRows(A); HYPRE_Int num_colsA = hypre_CSRMatrixNumCols(A); HYPRE_Int num_nonzerosA = hypre_CSRMatrixNumNonzeros(A); HYPRE_Complex *AT_data; /*HYPRE_Int *AT_i;*/ HYPRE_Int *AT_j; HYPRE_Int num_rowsAT; HYPRE_Int num_colsAT; HYPRE_Int num_nonzerosAT; HYPRE_Int max_col; HYPRE_Int i, j; /*-------------------------------------------------------------- * First, ascertain that num_cols and num_nonzeros has been set. * If not, set them. *--------------------------------------------------------------*/ if (! num_nonzerosA) { num_nonzerosA = A_i[num_rowsA]; } if (num_rowsA && num_nonzerosA && ! num_colsA) { max_col = -1; for (i = 0; i < num_rowsA; ++i) { for (j = A_i[i]; j < A_i[i+1]; j++) { if (A_j[j] > max_col) max_col = A_j[j]; } } num_colsA = max_col+1; } num_rowsAT = num_colsA; num_colsAT = num_rowsA; num_nonzerosAT = num_nonzerosA; *AT = hypre_CSRMatrixCreate(num_rowsAT, num_colsAT, num_nonzerosAT); if (0 == num_colsA) { // JSP: parallel counting sorting breaks down // when A has no columns hypre_CSRMatrixInitialize(*AT); return 0; } AT_j = hypre_CTAlloc(HYPRE_Int, num_nonzerosAT, HYPRE_MEMORY_SHARED); hypre_CSRMatrixJ(*AT) = AT_j; if (data) { AT_data = hypre_CTAlloc(HYPRE_Complex, num_nonzerosAT, HYPRE_MEMORY_SHARED); hypre_CSRMatrixData(*AT) = AT_data; } /*----------------------------------------------------------------- * Parallel count sort *-----------------------------------------------------------------*/ HYPRE_Int *bucket = hypre_TAlloc( HYPRE_Int, (num_colsA + 1)*hypre_NumThreads(), HYPRE_MEMORY_SHARED); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Int iBegin = hypre_CSRMatrixGetLoadBalancedPartitionBegin(A); HYPRE_Int iEnd = hypre_CSRMatrixGetLoadBalancedPartitionEnd(A); hypre_assert(iBegin <= iEnd); hypre_assert(iBegin >= 0 && iBegin <= num_rowsA); hypre_assert(iEnd >= 0 && iEnd <= num_rowsA); HYPRE_Int i, j; memset(bucket + my_thread_num*num_colsA, 0, sizeof(HYPRE_Int)*num_colsA); /*----------------------------------------------------------------- * Count the number of entries that will go into each bucket * bucket is used as HYPRE_Int[num_threads][num_colsA] 2D array *-----------------------------------------------------------------*/ for (j = A_i[iBegin]; j < A_i[iEnd]; ++j) { HYPRE_Int idx = A_j[j]; bucket[my_thread_num*num_colsA + idx]++; } /*----------------------------------------------------------------- * Parallel prefix sum of bucket with length num_colsA * num_threads * accessed as if it is transposed as HYPRE_Int[num_colsA][num_threads] *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = my_thread_num*num_colsA + 1; i < (my_thread_num + 1)*num_colsA; ++i) { HYPRE_Int transpose_i = transpose_idx(i, num_threads, num_colsA); HYPRE_Int transpose_i_minus_1 = transpose_idx(i - 1, num_threads, num_colsA); bucket[transpose_i] += bucket[transpose_i_minus_1]; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #pragma omp master #endif { for (i = 1; i < num_threads; ++i) { HYPRE_Int j0 = num_colsA*i - 1, j1 = num_colsA*(i + 1) - 1; HYPRE_Int transpose_j0 = transpose_idx(j0, num_threads, num_colsA); HYPRE_Int transpose_j1 = transpose_idx(j1, num_threads, num_colsA); bucket[transpose_j1] += bucket[transpose_j0]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) { HYPRE_Int transpose_i0 = transpose_idx(num_colsA*my_thread_num - 1, num_threads, num_colsA); HYPRE_Int offset = bucket[transpose_i0]; for (i = my_thread_num*num_colsA; i < (my_thread_num + 1)*num_colsA - 1; ++i) { HYPRE_Int transpose_i = transpose_idx(i, num_threads, num_colsA); bucket[transpose_i] += offset; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /*---------------------------------------------------------------- * Load the data and column numbers of AT *----------------------------------------------------------------*/ if (data) { for (i = iEnd - 1; i >= iBegin; --i) { for (j = A_i[i + 1] - 1; j >= A_i[i]; --j) { HYPRE_Int idx = A_j[j]; --bucket[my_thread_num*num_colsA + idx]; HYPRE_Int offset = bucket[my_thread_num*num_colsA + idx]; AT_data[offset] = A_data[j]; AT_j[offset] = i; } } } else { for (i = iEnd - 1; i >= iBegin; --i) { for (j = A_i[i + 1] - 1; j >= A_i[i]; --j) { HYPRE_Int idx = A_j[j]; --bucket[my_thread_num*num_colsA + idx]; HYPRE_Int offset = bucket[my_thread_num*num_colsA + idx]; AT_j[offset] = i; } } } } /*end parallel region */ hypre_CSRMatrixI(*AT) = bucket; // JSP: bucket is hypre_NumThreads() times longer than // the size needed for AT_i, but this should be OK. // If the memory size is a concern, we can allocate // a new memory for AT_i and copy from bucket. hypre_CSRMatrixI(*AT)[num_colsA] = num_nonzerosA; return(0); } /*-------------------------------------------------------------------------- * hypre_CSRMatrixReorder: * Reorders the column and data arrays of a square CSR matrix, such that the * first entry in each row is the diagonal one. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixReorder(hypre_CSRMatrix *A) { HYPRE_Int i, j, tempi, row_size; HYPRE_Complex tempd; HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int num_rowsA = hypre_CSRMatrixNumRows(A); HYPRE_Int num_colsA = hypre_CSRMatrixNumCols(A); /* the matrix should be square */ if (num_rowsA != num_colsA) return -1; for (i = 0; i < num_rowsA; i++) { row_size = A_i[i+1]-A_i[i]; for (j = 0; j < row_size; j++) { if (A_j[j] == i) { if (j != 0) { tempi = A_j[0]; A_j[0] = A_j[j]; A_j[j] = tempi; tempd = A_data[0]; A_data[0] = A_data[j]; A_data[j] = tempd; } break; } /* diagonal element is missing */ if (j == row_size-1) return -2; } A_j += row_size; A_data += row_size; } return 0; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixSumElts: * Returns the sum of all matrix elements. *--------------------------------------------------------------------------*/ HYPRE_Complex hypre_CSRMatrixSumElts( hypre_CSRMatrix *A ) { HYPRE_Complex sum = 0; HYPRE_Complex *data = hypre_CSRMatrixData( A ); HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); HYPRE_Int i; for ( i=0; i<num_nonzeros; ++i ) sum += data[i]; return sum; }
Par-12-SeqForParForNestedParFor.c
int main(int argc, char **argv) { int a[4] = {1,2,3,4}; int b[4] = {1,1,1,1}; for (int i = 0; i < 1; ++i) { if (i < 2) { return -1; } } #pragma omp parallel for for (int i = 0; i < 4; ++i) { a[i] = 3*a[i]; #pragma omp parallel for for(int j = 0; j < 4; ++j) { b[j] += a[i]; } } return 0; }
par_mgr.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Two-grid system solver * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "par_mgr.h" #ifdef HYPRE_USING_DSUPERLU #include "dsuperlu.h" #endif #if defined(HYPRE_USING_CUDA) void hypre_NoGPUSupport(char *option) { char msg[256]; hypre_sprintf(msg, "Error: Chosen %s option is not currently supported on GPU\n\n", option); hypre_printf("%s ", msg); // hypre_error_w_msg(1, msg); hypre_MPI_Abort(hypre_MPI_COMM_WORLD, -1); } #endif /* Create */ void * hypre_MGRCreate() { hypre_ParMGRData *mgr_data; mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST); /* block data */ (mgr_data -> block_size) = 1; (mgr_data -> block_num_coarse_indexes) = NULL; (mgr_data -> point_marker_array) = NULL; (mgr_data -> block_cf_marker) = NULL; /* general data */ (mgr_data -> max_num_coarse_levels) = 10; (mgr_data -> A_array) = NULL; #if defined(HYPRE_USING_CUDA) (mgr_data -> P_FF_array) = NULL; #endif (mgr_data -> P_array) = NULL; (mgr_data -> RT_array) = NULL; (mgr_data -> RAP) = NULL; (mgr_data -> CF_marker_array) = NULL; (mgr_data -> coarse_indices_lvls) = NULL; (mgr_data -> A_ff_array) = NULL; (mgr_data -> F_fine_array) = NULL; (mgr_data -> U_fine_array) = NULL; (mgr_data -> aff_solver) = NULL; (mgr_data -> fine_grid_solver_setup) = NULL; (mgr_data -> fine_grid_solver_solve) = NULL; (mgr_data -> F_array) = NULL; (mgr_data -> U_array) = NULL; (mgr_data -> residual) = NULL; (mgr_data -> rel_res_norms) = NULL; (mgr_data -> Vtemp) = NULL; (mgr_data -> Ztemp) = NULL; (mgr_data -> Utemp) = NULL; (mgr_data -> Ftemp) = NULL; (mgr_data -> num_iterations) = 0; (mgr_data -> num_interp_sweeps) = 1; (mgr_data -> num_restrict_sweeps) = 1; (mgr_data -> trunc_factor) = 0.0; (mgr_data -> max_row_sum) = 0.9; (mgr_data -> strong_threshold) = 0.25; (mgr_data -> P_max_elmts) = 0; (mgr_data -> coarse_grid_solver) = NULL; (mgr_data -> coarse_grid_solver_setup) = NULL; (mgr_data -> coarse_grid_solver_solve) = NULL; (mgr_data -> global_smoother) = NULL; (mgr_data -> use_default_cgrid_solver) = 1; (mgr_data -> fsolver_mode) = -1; // set to -1 to avoid printing when not used (mgr_data -> omega) = 1.; (mgr_data -> max_iter) = 20; (mgr_data -> tol) = 1.0e-6; (mgr_data -> relax_type) = 0; (mgr_data -> relax_order) = 1; // not fully utilized. Only used to compute L1-norms. (mgr_data -> interp_type) = NULL; (mgr_data -> restrict_type) = NULL; (mgr_data -> num_relax_sweeps) = 1; (mgr_data -> relax_weight) = 1.0; (mgr_data -> logging) = 0; (mgr_data -> print_level) = 0; (mgr_data -> frelax_print_level) = 0; (mgr_data -> cg_print_level) = 0; (mgr_data -> l1_norms) = NULL; (mgr_data -> reserved_coarse_size) = 0; (mgr_data -> reserved_coarse_indexes) = NULL; (mgr_data -> reserved_Cpoint_local_indexes) = NULL; (mgr_data -> diaginv) = NULL; (mgr_data -> global_smooth_iters) = 1; (mgr_data -> global_smooth_type) = 0; (mgr_data -> set_non_Cpoints_to_F) = 0; (mgr_data -> idx_array) = NULL; (mgr_data -> Frelax_method) = NULL; (mgr_data -> VcycleRelaxVtemp) = NULL; (mgr_data -> VcycleRelaxZtemp) = NULL; (mgr_data -> FrelaxVcycleData) = NULL; (mgr_data -> Frelax_num_functions) = NULL; (mgr_data -> max_local_lvls) = 10; (mgr_data -> use_non_galerkin_cg) = NULL; (mgr_data -> print_coarse_system) = 0; (mgr_data -> set_c_points_method) = 0; (mgr_data -> lvl_to_keep_cpoints) = 0; (mgr_data -> cg_convergence_factor) = 0.0; (mgr_data -> truncate_coarse_grid_threshold) = 0.0; return (void *) mgr_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /* Destroy */ HYPRE_Int hypre_MGRDestroy( void *data ) { hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data; HYPRE_Int i; HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels); /* block info data */ if ((mgr_data -> block_cf_marker)) { for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); } } hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if (mgr_data -> block_num_coarse_indexes) { hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* final residual vector */ if ((mgr_data -> residual)) { hypre_ParVectorDestroy( (mgr_data -> residual) ); (mgr_data -> residual) = NULL; } if ((mgr_data -> rel_res_norms)) { hypre_TFree( (mgr_data -> rel_res_norms), HYPRE_MEMORY_HOST); (mgr_data -> rel_res_norms) = NULL; } /* temp vectors for solve phase */ if ((mgr_data -> Vtemp)) { hypre_ParVectorDestroy( (mgr_data -> Vtemp) ); (mgr_data -> Vtemp) = NULL; } if ((mgr_data -> Ztemp)) { hypre_ParVectorDestroy( (mgr_data -> Ztemp) ); (mgr_data -> Ztemp) = NULL; } if ((mgr_data -> Utemp)) { hypre_ParVectorDestroy( (mgr_data -> Utemp) ); (mgr_data -> Utemp) = NULL; } if ((mgr_data -> Ftemp)) { hypre_ParVectorDestroy( (mgr_data -> Ftemp) ); (mgr_data -> Ftemp) = NULL; } /* coarse grid solver */ if ((mgr_data -> use_default_cgrid_solver)) { if ((mgr_data -> coarse_grid_solver)) { hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) ); } (mgr_data -> coarse_grid_solver) = NULL; } /* l1_norms */ if ((mgr_data -> l1_norms)) { for (i = 0; i < (num_coarse_levels); i++) { hypre_SeqVectorDestroy((mgr_data -> l1_norms)[i]); } hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST); } /* coarse_indices_lvls */ if ((mgr_data -> coarse_indices_lvls)) { for (i = 0; i < (num_coarse_levels); i++) if ((mgr_data -> coarse_indices_lvls)[i]) { hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST); } hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST); } /* linear system and cf marker array */ if (mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array) { for (i = 1; i < num_coarse_levels + 1; i++) { hypre_ParVectorDestroy((mgr_data -> F_array)[i]); hypre_ParVectorDestroy((mgr_data -> U_array)[i]); if ((mgr_data -> P_array)[i - 1]) { hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i - 1]); } if ((mgr_data -> RT_array)[i - 1]) { hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i - 1]); } hypre_IntArrayDestroy(mgr_data -> CF_marker_array[i - 1]); } for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_array)[i]) { hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]); } } } #if defined(HYPRE_USING_CUDA) if (mgr_data -> P_FF_array) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> P_array)[i]) { hypre_ParCSRMatrixDestroy((mgr_data -> P_FF_array)[i]); } } //hypre_TFree(P_FF_array, hypre_HandleMemoryLocation(hypre_handle())); hypre_TFree((mgr_data -> P_FF_array), HYPRE_MEMORY_HOST); } #endif /* AMG for Frelax */ if (mgr_data -> A_ff_array || mgr_data -> F_fine_array || mgr_data -> U_fine_array) { for (i = 1; i < num_coarse_levels + 1; i++) { if (mgr_data -> F_fine_array[i]) { hypre_ParVectorDestroy((mgr_data -> F_fine_array)[i]); } if (mgr_data -> U_fine_array[i]) { hypre_ParVectorDestroy((mgr_data -> U_fine_array)[i]); } } for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_ff_array)[i]) { hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[i]); } } if (mgr_data -> fsolver_mode > 0) { if ((mgr_data -> A_ff_array)[0]) { hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[0]); } } hypre_TFree(mgr_data -> F_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> F_fine_array) = NULL; hypre_TFree(mgr_data -> U_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> U_fine_array) = NULL; hypre_TFree(mgr_data -> A_ff_array, HYPRE_MEMORY_HOST); (mgr_data -> A_ff_array) = NULL; } if (mgr_data -> aff_solver) { for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> aff_solver)[i]) { hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[i]); } } if (mgr_data -> fsolver_mode == 2) { if ((mgr_data -> aff_solver)[0]) { hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[0]); } } hypre_TFree(mgr_data -> aff_solver, HYPRE_MEMORY_HOST); (mgr_data -> aff_solver) = NULL; } if ((mgr_data -> F_array)) { hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST); (mgr_data -> F_array) = NULL; } if ((mgr_data -> U_array)) { hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST); (mgr_data -> U_array) = NULL; } if ((mgr_data -> A_array)) { hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST); (mgr_data -> A_array) = NULL; } if ((mgr_data -> P_array)) { hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST); (mgr_data -> P_array) = NULL; } if ((mgr_data -> RT_array)) { hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST); (mgr_data -> RT_array) = NULL; } if ((mgr_data -> CF_marker_array)) { hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST); (mgr_data -> CF_marker_array) = NULL; } if ((mgr_data -> reserved_Cpoint_local_indexes)) { hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_Cpoint_local_indexes) = NULL; } if (mgr_data -> restrict_type) { hypre_TFree(mgr_data -> restrict_type, HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } if (mgr_data -> interp_type) { hypre_TFree(mgr_data -> interp_type, HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } /* Frelax_method */ if (mgr_data -> Frelax_method) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } /* Frelax_num_functions */ if (mgr_data -> Frelax_num_functions) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } /* data for V-cycle F-relaxation */ if ((mgr_data -> VcycleRelaxVtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxVtemp) ); (mgr_data -> VcycleRelaxVtemp) = NULL; } if ((mgr_data -> VcycleRelaxZtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxZtemp) ); (mgr_data -> VcycleRelaxZtemp) = NULL; } if (mgr_data -> FrelaxVcycleData) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> FrelaxVcycleData)[i]) { hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]); (mgr_data -> FrelaxVcycleData)[i] = NULL; } } hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST); (mgr_data -> FrelaxVcycleData) = NULL; } /* data for reserved coarse nodes */ if (mgr_data -> reserved_coarse_indexes) { hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* index array for setting Cpoints by global block */ if ((mgr_data -> set_c_points_method) == 1) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } /* array for setting option to use non-Galerkin coarse grid */ if (mgr_data -> use_non_galerkin_cg) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } /* coarse level matrix - RAP */ if ((mgr_data -> RAP)) { hypre_ParCSRMatrixDestroy((mgr_data -> RAP)); } if ((mgr_data -> diaginv)) { hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST); } if ((mgr_data -> global_smoother)) { if (mgr_data -> global_smooth_type == 8) { HYPRE_EuclidDestroy((mgr_data -> global_smoother)); } else if (mgr_data -> global_smooth_type == 16) { HYPRE_ILUDestroy((mgr_data -> global_smoother)); } } /* mgr data */ hypre_TFree(mgr_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Create data for V-cycle F-relaxtion */ void * hypre_MGRCreateFrelaxVcycleData() { hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST); hypre_ParAMGDataAArray(vdata) = NULL; hypre_ParAMGDataPArray(vdata) = NULL; hypre_ParAMGDataFArray(vdata) = NULL; hypre_ParAMGDataCFMarkerArray(vdata) = NULL; hypre_ParAMGDataVtemp(vdata) = NULL; hypre_ParAMGDataAMat(vdata) = NULL; hypre_ParAMGDataBVec(vdata) = NULL; hypre_ParAMGDataZtemp(vdata) = NULL; hypre_ParAMGDataCommInfo(vdata) = NULL; hypre_ParAMGDataUArray(vdata) = NULL; hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL; hypre_ParAMGDataNumLevels(vdata) = 0; hypre_ParAMGDataMaxLevels(vdata) = 10; hypre_ParAMGDataNumFunctions(vdata) = 1; hypre_ParAMGDataSCommPkgSwitch(vdata) = 1.0; hypre_ParAMGDataRelaxOrder(vdata) = 1; hypre_ParAMGDataMaxCoarseSize(vdata) = 9; hypre_ParAMGDataMinCoarseSize(vdata) = 0; hypre_ParAMGDataUserCoarseRelaxType(vdata) = 9; return (void *) vdata; } /* Destroy data for V-cycle F-relaxation */ HYPRE_Int hypre_MGRDestroyFrelaxVcycleData( void *data ) { hypre_ParAMGData * vdata = (hypre_ParAMGData*) data; HYPRE_Int i; HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata); MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[0], HYPRE_MEMORY_HOST); for (i = 1; i < num_levels + 1; i++) { if (hypre_ParAMGDataAArray(vdata)[i]) { hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]); } if (hypre_ParAMGDataPArray(vdata)[i - 1]) { hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i - 1]); } hypre_IntArrayDestroy(hypre_ParAMGDataCFMarkerArray(vdata)[i - 1]); hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]); hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[i], HYPRE_MEMORY_HOST); } if (num_levels < 1) { hypre_IntArrayDestroy(hypre_ParAMGDataCFMarkerArray(vdata)[0]); } /* Points to VcycleRelaxVtemp of mgr_data, which is already destroyed */ //hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata)); hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST); //hypre_TFree(hypre_ParAMGDataGridRelaxType(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata), HYPRE_MEMORY_HOST); /* Points to VcycleRelaxZtemp of mgr_data, which is already destroyed */ /* if (hypre_ParAMGDataZtemp(vdata)) hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata)); */ if (hypre_ParAMGDataAMat(vdata)) { hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST); } if (hypre_ParAMGDataBVec(vdata)) { hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST); } if (hypre_ParAMGDataCommInfo(vdata)) { hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST); } if (new_comm != hypre_MPI_COMM_NULL) { hypre_MPI_Comm_free (&new_comm); } hypre_TFree(vdata, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Set C-point variables for each reduction level */ /* Currently not implemented */ HYPRE_Int hypre_MGRSetReductionLevelCpoints( void *mgr_vdata, HYPRE_Int nlevels, HYPRE_Int *num_coarse_points, HYPRE_Int **level_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_coarse_levels) = nlevels; (mgr_data -> num_coarse_per_level) = num_coarse_points; (mgr_data -> level_coarse_indexes) = level_coarse_indexes; return hypre_error_flag; } /* Initialize some data */ /* Set whether non-coarse points on each level should be explicitly tagged as F-points */ HYPRE_Int hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag; return hypre_error_flag; } /* Set whether the reserved C points are reduced before the coarse grid solve */ HYPRE_Int hypre_MGRSetReservedCpointsLevelToKeep(void *mgr_vdata, HYPRE_Int level) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> lvl_to_keep_cpoints) = level; return hypre_error_flag; } /* Set Cpoints by contiguous blocks, i.e. p1, p2, ..., pn, s1, s2, ..., sn, ... */ HYPRE_Int hypre_MGRSetCpointsByContiguousBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_BigInt *begin_idx_array, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; if ((mgr_data -> idx_array) != NULL) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } HYPRE_BigInt *index_array = hypre_CTAlloc(HYPRE_BigInt, block_size, HYPRE_MEMORY_HOST); if (begin_idx_array != NULL) { for (i = 0; i < block_size; i++) { index_array[i] = *(begin_idx_array + i); } } hypre_MGRSetCpointsByBlock(mgr_data, block_size, max_num_levels, block_num_coarse_points, block_coarse_indexes); (mgr_data -> idx_array) = index_array; (mgr_data -> set_c_points_method) = 1; return hypre_error_flag; } /* Initialize/ set local block data information */ HYPRE_Int hypre_MGRSetCpointsByBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { HYPRE_Int i, j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; /* free block cf_marker data if not previously destroyed */ if ((mgr_data -> block_cf_marker) != NULL) { for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if ((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size * sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for (j = 0; j < block_num_coarse_points[i]; j++) { (block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK; } } /* store block_num_coarse_points */ if (max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_num_coarse_indexes[i] = block_num_coarse_points[i]; } } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> set_c_points_method) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetCpointsByPointMarkerArray( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *lvl_num_coarse_points, HYPRE_Int **lvl_coarse_indexes, HYPRE_Int *point_marker_array) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i, j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; /* free block cf_marker data if not previously destroyed */ if ((mgr_data -> block_cf_marker) != NULL) { for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if ((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size * sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for (j = 0; j < lvl_num_coarse_points[i]; j++) { block_cf_marker[i][j] = lvl_coarse_indexes[i][j]; } } /* store block_num_coarse_points */ if (max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_num_coarse_indexes[i] = lvl_num_coarse_points[i]; } } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> point_marker_array) = point_marker_array; (mgr_data -> set_c_points_method) = 2; return hypre_error_flag; } /*Set number of points that remain part of the coarse grid throughout the hierarchy */ HYPRE_Int hypre_MGRSetReservedCoarseNodes(void *mgr_vdata, HYPRE_Int reserved_coarse_size, HYPRE_BigInt *reserved_cpt_index) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_BigInt *reserved_coarse_indexes = NULL; HYPRE_Int i; if (!mgr_data) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Warning! MGR object empty!\n"); return hypre_error_flag; } if (reserved_coarse_size < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* free data not previously destroyed */ if ((mgr_data -> reserved_coarse_indexes)) { hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* set reserved coarse nodes */ if (reserved_coarse_size > 0) { reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST); for (i = 0; i < reserved_coarse_size; i++) { reserved_coarse_indexes[i] = reserved_cpt_index[i]; } } (mgr_data -> reserved_coarse_size) = reserved_coarse_size; (mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes; return hypre_error_flag; } /* Set CF marker array */ HYPRE_Int hypre_MGRCoarsen(hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int fixed_coarse_size, HYPRE_Int *fixed_coarse_indexes, HYPRE_Int debug_flag, hypre_IntArray **CF_marker_ptr, HYPRE_Int cflag) { HYPRE_Int *CF_marker = NULL; HYPRE_Int *cindexes = fixed_coarse_indexes; HYPRE_Int i, row, nc; HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* If this is the last level, coarsen onto fixed coarse set */ if (cflag) { if (*CF_marker_ptr != NULL) { hypre_IntArrayDestroy(*CF_marker_ptr); } *CF_marker_ptr = hypre_IntArrayCreate(nloc); hypre_IntArrayInitialize(*CF_marker_ptr); hypre_IntArraySetConstantValues(*CF_marker_ptr, FMRK); CF_marker = hypre_IntArrayData(*CF_marker_ptr); /* first mark fixed coarse set */ nc = fixed_coarse_size; for (i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } } else { /* First coarsen to get initial CF splitting. * This is then followed by updating the CF marker to pass * coarse information to the next levels. NOTE: It may be * convenient to implement this way (allows the use of multiple * coarsening strategies without changing too much code), * but not necessarily the best option, compared to initializing * CF_marker first and then coarsening on subgraph which excludes * the initialized coarse nodes. */ hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, CF_marker_ptr); CF_marker = hypre_IntArrayData(*CF_marker_ptr); /* Update CF_marker to correct Cpoints marked as Fpoints. */ nc = fixed_coarse_size; for (i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ for (row = 0; row < nloc; row++) { if (CF_marker[row] == CMRK) { continue; } CF_marker[row] = FMRK; } #if 0 /* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points * in the next level. */ nc = 0; index_i = 0; for (row = 0; row < nloc; row++) { /* loop through new c-points */ if (CF_marker[row] == CMRK) { nc++; } else if (CF_marker[row] == S_CMRK) { /* previously marked c-point is part of fixed coarse set. Track its current local index */ cindexes[index_i++] = nc; /* reset c-point from S_CMRK to CMRK */ cf_marker[row] = CMRK; nc++; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ else { CF_marker[row] = FMRK; } } /* check if this should be last level */ if ( nc == fixed_coarse_size) { last_level = 1; } //printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size); #endif } return hypre_error_flag; } HYPRE_Int hypre_ExtendWtoPHost(HYPRE_Int P_nr_of_rows, HYPRE_Int *CF_marker, HYPRE_Int *W_diag_i, HYPRE_Int *W_diag_j, HYPRE_Complex *W_diag_data, HYPRE_Int *P_diag_i, HYPRE_Int *P_diag_j, HYPRE_Complex *P_diag_data, HYPRE_Int *W_offd_i, HYPRE_Int *P_offd_i ) { HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int coarse_counter; HYPRE_Int i, jj; HYPRE_Real one = 1.0; /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ fine_to_coarse = hypre_CTAlloc(HYPRE_Int, P_nr_of_rows, HYPRE_MEMORY_HOST); for (i = 0; i < P_nr_of_rows; i++) { fine_to_coarse[i] = -1; } /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < P_nr_of_rows; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { fine_to_coarse[i] = coarse_counter; coarse_counter++; } } /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; row_counter = 0; for (i = 0; i < P_nr_of_rows; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = W_diag_i[row_counter]; jj < W_diag_i[row_counter + 1]; jj++) { //P_marker[row_counter] = jj_counter; P_diag_j[jj_counter] = W_diag_j[jj]; P_diag_data[jj_counter] = W_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_counter_offd += W_offd_i[row_counter + 1] - W_offd_i[row_counter]; row_counter++; } /* update off-diagonal row pointer */ P_offd_i[i + 1] = jj_counter_offd; } P_diag_i[P_nr_of_rows] = jj_counter; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); return 0; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildPHost( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; HYPRE_Int A_nr_of_rows = hypre_ParCSRMatrixNumRows(A); hypre_ParCSRMatrix *A_FF = NULL, *A_FC = NULL, *P = NULL; hypre_CSRMatrix *W_diag = NULL, *W_offd = NULL; HYPRE_Int P_diag_nnz, nfpoints; HYPRE_Int *P_diag_i = NULL, *P_diag_j = NULL, *P_offd_i = NULL; HYPRE_Complex *P_diag_data = NULL, *diag = NULL, *diag1 = NULL; HYPRE_BigInt nC_global; HYPRE_Int i; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); nfpoints = 0; for (i = 0; i < A_nr_of_rows; i++) { if (CF_marker[i] == -1) { nfpoints++; } } if (method > 0) { hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, NULL, &A_FC, &A_FF); diag = hypre_CTAlloc(HYPRE_Complex, nfpoints, memory_location_P); if (method == 1) { // extract diag inverse sqrt // hypre_CSRMatrixExtractDiagonalHost(hypre_ParCSRMatrixDiag(A_FF), diag, 3); // L1-Jacobi-type interpolation HYPRE_Complex scal = 1.0; diag1 = hypre_CTAlloc(HYPRE_Complex, nfpoints, memory_location_P); hypre_CSRMatrixExtractDiagonalHost(hypre_ParCSRMatrixDiag(A_FF), diag, 0); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixDiag(A_FF), NULL, NULL, diag1, 1, 1.0, "set"); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixDiag(A_FC), NULL, NULL, diag1, 1, 1.0, "add"); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixOffd(A_FF), NULL, NULL, diag1, 1, 1.0, "add"); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixOffd(A_FC), NULL, NULL, diag1, 1, 1.0, "add"); for (i = 0; i < nfpoints; i++) { HYPRE_Complex dsum = diag[i] + scal * (diag1[i] - hypre_cabs(diag[i])); diag[i] = 1. / dsum; } hypre_TFree(diag1, memory_location_P); } else if (method == 2) { // extract diag inverse hypre_CSRMatrixExtractDiagonalHost(hypre_ParCSRMatrixDiag(A_FF), diag, 2); } for (i = 0; i < nfpoints; i++) { diag[i] = -diag[i]; } hypre_Vector *D_FF_inv = hypre_SeqVectorCreate(nfpoints); hypre_VectorData(D_FF_inv) = diag; hypre_SeqVectorInitialize_v2(D_FF_inv, memory_location_P); hypre_CSRMatrixDiagScale(hypre_ParCSRMatrixDiag(A_FC), D_FF_inv, NULL); hypre_CSRMatrixDiagScale(hypre_ParCSRMatrixOffd(A_FC), D_FF_inv, NULL); hypre_SeqVectorDestroy(D_FF_inv); W_diag = hypre_ParCSRMatrixDiag(A_FC); W_offd = hypre_ParCSRMatrixOffd(A_FC); nC_global = hypre_ParCSRMatrixGlobalNumCols(A_FC); } else { W_diag = hypre_CSRMatrixCreate(nfpoints, A_nr_of_rows - nfpoints, 0); W_offd = hypre_CSRMatrixCreate(nfpoints, 0, 0); hypre_CSRMatrixInitialize_v2(W_diag, 0, memory_location_P); hypre_CSRMatrixInitialize_v2(W_offd, 0, memory_location_P); if (my_id == (num_procs - 1)) { nC_global = num_cpts_global[1]; } hypre_MPI_Bcast(&nC_global, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); } /* Construct P from matrix product W_diag */ P_diag_nnz = hypre_CSRMatrixNumNonzeros(W_diag) + hypre_CSRMatrixNumCols(W_diag); P_diag_i = hypre_CTAlloc(HYPRE_Int, A_nr_of_rows + 1, memory_location_P); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_nnz, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Complex, P_diag_nnz, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, A_nr_of_rows + 1, memory_location_P); /* Extend W data to P data */ hypre_ExtendWtoPHost( A_nr_of_rows, CF_marker, hypre_CSRMatrixI(W_diag), hypre_CSRMatrixJ(W_diag), hypre_CSRMatrixData(W_diag), P_diag_i, P_diag_j, P_diag_data, hypre_CSRMatrixI(W_offd), P_offd_i ); // finalize P P = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), nC_global, hypre_ParCSRMatrixColStarts(A), num_cpts_global, hypre_CSRMatrixNumCols(W_offd), P_diag_nnz, hypre_CSRMatrixNumNonzeros(W_offd) ); hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(P)) = memory_location_P; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(P)) = memory_location_P; hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(P)) = P_diag_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixDiag(P)) = P_diag_j; hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(P)) = P_diag_data; hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(P)) = P_offd_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixJ(W_offd); hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixData(W_offd); hypre_CSRMatrixJ(W_offd) = NULL; hypre_CSRMatrixData(W_offd) = NULL; if (method > 0) { hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(A_FC); hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(A_FC); hypre_ParCSRMatrixColMapOffd(A_FC) = NULL; hypre_ParCSRMatrixColMapOffd(A_FC) = NULL; hypre_ParCSRMatrixNumNonzeros(P) = hypre_ParCSRMatrixNumNonzeros( A_FC) + hypre_ParCSRMatrixGlobalNumCols(A_FC); } else { hypre_ParCSRMatrixNumNonzeros(P) = nC_global; } hypre_ParCSRMatrixDNumNonzeros(P) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(P); hypre_MatvecCommPkgCreate(P); *P_ptr = P; if (A_FF) { hypre_ParCSRMatrixDestroy(A_FF); } if (A_FC) { hypre_ParCSRMatrixDestroy(A_FC); } if (method <= 0) { hypre_CSRMatrixDestroy(W_diag); hypre_CSRMatrixDestroy(W_offd); } return hypre_error_flag; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildP( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if ((CF_marker[i1] >= 0) && (method > 0)) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if ((CF_marker_offd[i1] >= 0) && (method > 0)) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) { coarse_shift = coarse_counter[j - 1]; } size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /* index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { if (CF_marker[i] < 0) { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if ( i == i1 ) /* diagonal of A only */ { a_diag[i] = 1.0 / A_diag_data[jj]; } } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } else { P_marker_offd = NULL; } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if ((CF_marker[i1] >= 0) && (method > 0)) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; /* if(method == 0) { P_diag_data[jj_counter] = 0.0; } */ if (method == 1) { P_diag_data[jj_counter] = - A_diag_data[jj]; } else if (method == 2) { P_diag_data[jj_counter] = - A_diag_data[jj] * a_diag[i]; } jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if ((CF_marker_offd[i1] >= 0) && (method > 0)) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; /* if(method == 0) { P_offd_data[jj_counter_offd] = 0.0; } */ if (method == 1) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]; } else if (method == 2) { P_offd_data[jj_counter_offd] = - A_offd_data[jj] * a_diag[i]; } jj_counter_offd++; } } } } P_offd_i[i + 1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < num_cols_A_offd; i++) { P_marker[i] = 0; } num_cols_P_offd = 0; for (i = 0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) { index++; } tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) if (CF_marker[i] == -3) { CF_marker[i] = -1; } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return (0); } /* Interpolation for MGR - Dynamic Row Sum method */ HYPRE_Int hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } /*-------------------------------------------------------------------- * Set up the indexes for the DRS method *--------------------------------------------------------------------*/ } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) { coarse_shift = coarse_counter[j - 1]; } size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if ( i == i1 ) /* diagonal of A only */ { a_diag[i] = 1.0 / A_diag_data[jj]; } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } else { P_marker_offd = NULL; } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = - A_diag_data[jj] * a_diag[i]; jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = - A_offd_data[jj] * a_diag[i]; jj_counter_offd++; } } } } P_offd_i[i + 1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < num_cols_A_offd; i++) { P_marker[i] = 0; } num_cols_P_offd = 0; for (i = 0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) { index++; } tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) if (CF_marker[i] == -3) { CF_marker[i] = -1; } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); // hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return (0); } /* Scale ParCSR matrix A = scalar * A * A: the target CSR matrix * vector: array of real numbers */ HYPRE_Int hypre_ParCSRMatrixLeftScale(HYPRE_Real *vector, hypre_ParCSRMatrix *A) { HYPRE_Int i, j, n_local; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); n_local = hypre_CSRMatrixNumRows(A_diag); for (i = 0; i < n_local; i++) { HYPRE_Real factor = vector[i]; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { A_diag_data[j] *= factor; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { A_offd_data[j] *= factor; } } return (0); } /************************************************************ * Available methods: * 0: inv(A_FF) approximated by its diagonal inverse * 1: inv(A_FF) approximated by sparse approximate inverse *************************************************************/ HYPRE_Int hypre_MGRComputeNonGalerkinCoarseGrid(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, hypre_ParCSRMatrix *RT, HYPRE_Int bsize, HYPRE_Int ordering, HYPRE_Int method, HYPRE_Int Pmax, HYPRE_Int keep_stencil, HYPRE_Int *CF_marker, hypre_ParCSRMatrix **A_h_ptr) { HYPRE_Int *c_marker, *f_marker; HYPRE_Int n_local_fine_grid, i, i1, jj; hypre_ParCSRMatrix *A_cc; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_cf; hypre_ParCSRMatrix *A_h; hypre_ParCSRMatrix *A_h_correction; HYPRE_Int max_elmts = Pmax; // HYPRE_Real wall_time = 0.; hypre_ParCSRMatrix *P_mod = NULL; HYPRE_Int my_id; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Comm_rank(comm, &my_id); HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); n_local_fine_grid = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); c_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); f_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fine_grid; i++) { HYPRE_Int point_type = CF_marker[i]; hypre_assert(point_type == 1 || point_type == -1); c_marker[i] = point_type; f_marker[i] = -point_type; } // get the A_cc sub-block hypre_MGRGetSubBlock(A, c_marker, c_marker, 0, &A_cc); if (method == 0) { if (keep_stencil) { //wall_time = time_getWallclockSeconds(); hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); // extract the diagonal of A_ff and compute D_ff_inv hypre_CSRMatrix *A_ff_diag = hypre_ParCSRMatrixDiag(A_ff); HYPRE_Real *A_ff_diag_data = hypre_CSRMatrixData(A_ff_diag); HYPRE_Int *A_ff_diag_i = hypre_CSRMatrixI(A_ff_diag); HYPRE_Int *A_ff_diag_j = hypre_CSRMatrixJ(A_ff_diag); HYPRE_Int n_local_fpoints = hypre_CSRMatrixNumRows(A_ff_diag); HYPRE_Real *D_ff_inv; D_ff_inv = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { for (jj = A_ff_diag_i[i]; jj < A_ff_diag_i[i + 1]; jj++) { i1 = A_ff_diag_j[jj]; if ( i == i1 ) { D_ff_inv[i] = -1.0 / A_ff_diag_data[jj]; } } } // extract the diagonal of A_cf hypre_CSRMatrix *A_cf_diag = hypre_ParCSRMatrixDiag(A_cf); HYPRE_Real *A_cf_diag_data = hypre_CSRMatrixData(A_cf_diag); HYPRE_Int *A_cf_diag_i = hypre_CSRMatrixI(A_cf_diag); HYPRE_Int *A_cf_diag_j = hypre_CSRMatrixJ(A_cf_diag); n_local_fpoints = hypre_CSRMatrixNumRows(A_cf_diag); HYPRE_Real *D_cf; D_cf = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { i1 = A_cf_diag_j[A_cf_diag_i[i]]; D_cf[i] = A_cf_diag_data[jj]; } // compute the triple product hypre_ParCSRMatrixLeftScale(D_ff_inv, A_fc); hypre_ParCSRMatrixLeftScale(D_cf, A_fc); A_h_correction = A_fc; hypre_TFree(D_cf, HYPRE_MEMORY_HOST); hypre_TFree(D_ff_inv, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_cf); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product D_cf * D_ff_inv * A_fc time: %1.5f\n", wall_time); } else { //wall_time = time_getWallclockSeconds(); P_mod = hypre_ParCSRMatrixCompleteClone(P); hypre_ParCSRMatrixCopy(P, P_mod, 1); HYPRE_Int n_local_rows = hypre_ParCSRMatrixNumRows(P_mod); hypre_CSRMatrix *P_mod_diag = hypre_ParCSRMatrixDiag(P_mod); HYPRE_Int *P_mod_diag_i = hypre_CSRMatrixI(P_mod_diag); HYPRE_Real *P_mod_diag_data = hypre_CSRMatrixData(P_mod_diag); for (i = 0; i < n_local_rows; i ++) { if (CF_marker[i] >= 0) { HYPRE_Int ii = P_mod_diag_i[i]; P_mod_diag_data[ii] = 0.0; } } hypre_BoomerAMGBuildCoarseOperator(RT, A, P_mod, &A_h_correction); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product time new: %1.5f\n", wall_time); hypre_ParCSRMatrixDestroy(P_mod); } } else { // Approximate inverse for ideal interploation hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); hypre_ParCSRMatrix *A_ff_inv = NULL; hypre_ParCSRMatrix *minus_Wp = NULL; hypre_MGRApproximateInverse(A_ff, &A_ff_inv); minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc); A_h_correction = hypre_ParMatmul(A_cf, minus_Wp); hypre_ParCSRMatrixDestroy(minus_Wp); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_cf); } // perform dropping for A_h_correction // specific to multiphase poromechanics // we only keep the diagonal of each block //wall_time = time_getWallclockSeconds(); HYPRE_Int n_local_cpoints = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_h_correction)); hypre_CSRMatrix *A_h_correction_diag = hypre_ParCSRMatrixDiag(A_h_correction); HYPRE_Real *A_h_correction_diag_data = hypre_CSRMatrixData(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_i = hypre_CSRMatrixI(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_j = hypre_CSRMatrixJ(A_h_correction_diag); HYPRE_Int ncol_diag = hypre_CSRMatrixNumCols(A_h_correction_diag); hypre_CSRMatrix *A_h_correction_offd = hypre_ParCSRMatrixOffd(A_h_correction); HYPRE_Real *A_h_correction_offd_data = hypre_CSRMatrixData(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_i = hypre_CSRMatrixI(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_j = hypre_CSRMatrixJ(A_h_correction_offd); if (Pmax > 0) { if (ordering == 0) // interleaved ordering { HYPRE_Int *A_h_correction_diag_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints + 1, memory_location); HYPRE_Int *A_h_correction_diag_j_new = hypre_CTAlloc(HYPRE_Int, (bsize + max_elmts) * n_local_cpoints, memory_location); HYPRE_Complex *A_h_correction_diag_data_new = hypre_CTAlloc(HYPRE_Complex, (bsize + max_elmts) * n_local_cpoints, memory_location); HYPRE_Int num_nonzeros_diag_new = 0; HYPRE_Int *A_h_correction_offd_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints + 1, memory_location); HYPRE_Int *A_h_correction_offd_j_new = hypre_CTAlloc(HYPRE_Int, max_elmts * n_local_cpoints, memory_location); HYPRE_Complex *A_h_correction_offd_data_new = hypre_CTAlloc(HYPRE_Complex, max_elmts * n_local_cpoints, memory_location); HYPRE_Int num_nonzeros_offd_new = 0; for (i = 0; i < n_local_cpoints; i++) { HYPRE_Int max_num_nonzeros = A_h_correction_diag_i[i + 1] - A_h_correction_diag_i[i] + A_h_correction_offd_i[i + 1] - A_h_correction_offd_i[i]; HYPRE_Int *aux_j = hypre_CTAlloc(HYPRE_Int, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Real *aux_data = hypre_CTAlloc(HYPRE_Real, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Int row_start = i - (i % bsize); HYPRE_Int row_stop = row_start + bsize - 1; HYPRE_Int cnt = 0; for (jj = A_h_correction_offd_i[i]; jj < A_h_correction_offd_i[i + 1]; jj++) { aux_j[cnt] = A_h_correction_offd_j[jj] + ncol_diag; aux_data[cnt] = A_h_correction_offd_data[jj]; cnt++; } for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i + 1]; jj++) { aux_j[cnt] = A_h_correction_diag_j[jj]; aux_data[cnt] = A_h_correction_diag_data[jj]; cnt++; } hypre_qsort2_abs(aux_j, aux_data, 0, cnt - 1); for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i + 1]; jj++) { i1 = A_h_correction_diag_j[jj]; if (i1 >= row_start && i1 <= row_stop) { // copy data to new arrays A_h_correction_diag_j_new[num_nonzeros_diag_new] = i1; A_h_correction_diag_data_new[num_nonzeros_diag_new] = A_h_correction_diag_data[jj]; ++num_nonzeros_diag_new; } else { // Do nothing } } if (max_elmts > 0) { for (jj = 0; jj < hypre_min(max_elmts, cnt); jj++) { HYPRE_Int col_idx = aux_j[jj]; HYPRE_Real col_value = aux_data[jj]; if (col_idx < ncol_diag && (col_idx < row_start || col_idx > row_stop)) { A_h_correction_diag_j_new[num_nonzeros_diag_new] = col_idx; A_h_correction_diag_data_new[num_nonzeros_diag_new] = col_value; ++num_nonzeros_diag_new; } else if (col_idx >= ncol_diag) { A_h_correction_offd_j_new[num_nonzeros_offd_new] = col_idx - ncol_diag; A_h_correction_offd_data_new[num_nonzeros_offd_new] = col_value; ++num_nonzeros_offd_new; } } } A_h_correction_diag_i_new[i + 1] = num_nonzeros_diag_new; A_h_correction_offd_i_new[i + 1] = num_nonzeros_offd_new; hypre_TFree(aux_j, HYPRE_MEMORY_HOST); hypre_TFree(aux_data, HYPRE_MEMORY_HOST); } hypre_TFree(A_h_correction_diag_i, memory_location); hypre_TFree(A_h_correction_diag_j, memory_location); hypre_TFree(A_h_correction_diag_data, memory_location); hypre_CSRMatrixI(A_h_correction_diag) = A_h_correction_diag_i_new; hypre_CSRMatrixJ(A_h_correction_diag) = A_h_correction_diag_j_new; hypre_CSRMatrixData(A_h_correction_diag) = A_h_correction_diag_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_diag) = num_nonzeros_diag_new; if (A_h_correction_offd_i) { hypre_TFree(A_h_correction_offd_i, memory_location); } if (A_h_correction_offd_j) { hypre_TFree(A_h_correction_offd_j, memory_location); } if (A_h_correction_offd_data) { hypre_TFree(A_h_correction_offd_data, memory_location); } hypre_CSRMatrixI(A_h_correction_offd) = A_h_correction_offd_i_new; hypre_CSRMatrixJ(A_h_correction_offd) = A_h_correction_offd_j_new; hypre_CSRMatrixData(A_h_correction_offd) = A_h_correction_offd_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_offd) = num_nonzeros_offd_new; } else { hypre_printf("Error!! Block ordering for non-Galerkin coarse grid is not currently supported\n"); exit(-1); } } //hypre_MGRParCSRMatrixTruncate(A_h_correction, max_elmts); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Filter A_h_correction time: %1.5f\n", wall_time); //hypre_ParCSRMatrixPrintIJ(A_h_correction,1,1,"A_h_correction_filtered"); // coarse grid / schur complement hypre_ParCSRMatrixAdd(1.0, A_cc, 1.0, A_h_correction, &A_h); *A_h_ptr = A_h; //hypre_ParCSRMatrixPrintIJ(A_h,1,1,"A_h"); hypre_ParCSRMatrixDestroy(A_cc); hypre_ParCSRMatrixDestroy(A_h_correction); hypre_TFree(c_marker, HYPRE_MEMORY_HOST); hypre_TFree(f_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRComputeAlgebraicFixedStress(hypre_ParCSRMatrix *A, HYPRE_BigInt *mgr_idx_array, HYPRE_Solver A_ff_solver) { HYPRE_Int *U_marker, *S_marker, *P_marker; HYPRE_Int n_fine, i; HYPRE_BigInt ibegin; hypre_ParCSRMatrix *A_up; hypre_ParCSRMatrix *A_uu; hypre_ParCSRMatrix *A_su; hypre_ParCSRMatrix *A_pu; hypre_ParVector *e1_vector; hypre_ParVector *e2_vector; hypre_ParVector *e3_vector; hypre_ParVector *e4_vector; hypre_ParVector *e5_vector; n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); ibegin = hypre_ParCSRMatrixFirstRowIndex(A); hypre_assert(ibegin == mgr_idx_array[0]); U_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); S_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { U_marker[i] = -1; S_marker[i] = -1; P_marker[i] = -1; } // create C and F markers for (i = 0; i < n_fine; i++) { if (i < mgr_idx_array[1] - ibegin) { U_marker[i] = 1; } else if (i >= (mgr_idx_array[1] - ibegin) && i < (mgr_idx_array[2] - ibegin)) { S_marker[i] = 1; } else { P_marker[i] = 1; } } // Get A_up hypre_MGRGetSubBlock(A, U_marker, P_marker, 0, &A_up); // GetA_uu hypre_MGRGetSubBlock(A, U_marker, U_marker, 0, &A_uu); // Get A_su hypre_MGRGetSubBlock(A, S_marker, U_marker, 0, &A_su); // Get A_pu hypre_MGRGetSubBlock(A, P_marker, U_marker, 0, &A_pu); e1_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_up), hypre_ParCSRMatrixGlobalNumCols(A_up), hypre_ParCSRMatrixColStarts(A_up)); hypre_ParVectorInitialize(e1_vector); hypre_ParVectorSetConstantValues(e1_vector, 1.0); e2_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e2_vector); hypre_ParVectorSetConstantValues(e2_vector, 0.0); e3_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e3_vector); hypre_ParVectorSetConstantValues(e3_vector, 0.0); e4_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_su), hypre_ParCSRMatrixGlobalNumRows(A_su), hypre_ParCSRMatrixRowStarts(A_su)); hypre_ParVectorInitialize(e4_vector); hypre_ParVectorSetConstantValues(e4_vector, 0.0); e5_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_pu), hypre_ParCSRMatrixGlobalNumRows(A_pu), hypre_ParCSRMatrixRowStarts(A_pu)); hypre_ParVectorInitialize(e5_vector); hypre_ParVectorSetConstantValues(e5_vector, 0.0); // compute e2 = A_up * e1 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_up, e1_vector, 0.0, e2_vector, e2_vector); // solve e3 = A_uu^-1 * e2 hypre_BoomerAMGSolve(A_ff_solver, A_uu, e2_vector, e3_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // print e4 hypre_ParVectorPrintIJ(e4_vector, 1, "Dsp"); // compute e5 = A_pu * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_pu, e3_vector, 0.0, e5_vector, e5_vector); hypre_ParVectorPrintIJ(e5_vector, 1, "Dpp"); hypre_ParVectorDestroy(e1_vector); hypre_ParVectorDestroy(e2_vector); hypre_ParVectorDestroy(e3_vector); hypre_ParCSRMatrixDestroy(A_uu); hypre_ParCSRMatrixDestroy(A_up); hypre_ParCSRMatrixDestroy(A_pu); hypre_ParCSRMatrixDestroy(A_su); hypre_TFree(U_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRApproximateInverse(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **A_inv) { HYPRE_Int print_level, mr_max_row_nnz, mr_max_iter, nsh_max_row_nnz, nsh_max_iter, mr_col_version; HYPRE_Real mr_tol, nsh_tol; HYPRE_Real *droptol = hypre_CTAlloc(HYPRE_Real, 2, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *approx_A_inv = NULL; print_level = 0; nsh_max_iter = 2; nsh_max_row_nnz = 2; // default 1000 mr_max_iter = 1; mr_tol = 1.0e-3; mr_max_row_nnz = 2; // default 800 mr_col_version = 0; nsh_tol = 1.0e-3; droptol[0] = 1.0e-2; droptol[1] = 1.0e-2; hypre_ILUParCSRInverseNSH(A, &approx_A_inv, droptol, mr_tol, nsh_tol, DIVIDE_TOL, mr_max_row_nnz, nsh_max_row_nnz, mr_max_iter, nsh_max_iter, mr_col_version, print_level); *A_inv = approx_A_inv; if (droptol) { hypre_TFree(droptol, HYPRE_MEMORY_HOST); } return hypre_error_flag; } HYPRE_Int hypre_MGRBuildInterpApproximateInverseExp(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *minus_Wp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int coarse_counter; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; // HYPRE_BigInt my_first_cpt; HYPRE_Int i, jj; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; // HYPRE_Int num_threads; // HYPRE_Real wall_time; /* for debugging instrumentation */ C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1) ? 1 : -1; F_marker[i] = (CF_marker[i] == 1) ? -1 : 1; } // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); // compute -Wp minus_Wp = hypre_ParMatmul(S, A_fc); hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp); HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag); hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); // num_threads = hypre_NumThreads(); // my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } else { /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++) { jj_counter++; } if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++) { jj_counter_offd++; } } row_counter++; } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ row_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++) { P_diag_j[jj_counter] = minus_Wp_diag_j[jj]; P_diag_data[jj_counter] = - minus_Wp_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++) { P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj]; P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj]; jj_counter_offd++; } } row_counter++; } P_offd_i[i + 1] = jj_counter_offd; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp); if (P_offd_size) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); *P_ptr = P; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(minus_Wp); return 0; } HYPRE_Int hypre_MGRBuildInterpApproximateInverse(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_ff_inv; hypre_ParCSRMatrix *W; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Int *P_offd_i; HYPRE_Int P_diag_nnz; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; HYPRE_Int i; HYPRE_Real m_one = -1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1) ? 1 : -1; F_marker[i] = (CF_marker[i] == 1) ? -1 : 1; } // Get A_FF hypre_MGRGetSubBlock(A, F_marker, F_marker, 0, &A_ff); // hypre_ParCSRMatrixPrintIJ(A_ff, 1, 1, "A_ff"); // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); hypre_MGRApproximateInverse(A_ff, &A_ff_inv); // hypre_ParCSRMatrixPrintIJ(A_ff_inv, 1, 1, "A_ff_inv"); // hypre_ParCSRMatrixPrintIJ(A_fc, 1, 1, "A_fc"); W = hypre_ParMatmul(A_ff_inv, A_fc); hypre_ParCSRMatrixScale(W, m_one); // hypre_ParCSRMatrixPrintIJ(W, 1, 1, "Wp"); hypre_CSRMatrix *W_diag = hypre_ParCSRMatrixDiag(W); hypre_CSRMatrix *W_offd = hypre_ParCSRMatrixOffd(W); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_nnz = hypre_CSRMatrixNumNonzeros(W_diag) + hypre_CSRMatrixNumCols(W_diag); P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_nnz, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_nnz, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P); /* Extend W data to P data */ hypre_ExtendWtoPHost( n_fine, CF_marker, hypre_CSRMatrixI(W_diag), hypre_CSRMatrixJ(W_diag), hypre_CSRMatrixData(W_diag), P_diag_i, P_diag_j, P_diag_data, hypre_CSRMatrixI(W_offd), P_offd_i ); // final P P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, hypre_CSRMatrixNumCols(W_offd), P_diag_nnz, hypre_CSRMatrixNumNonzeros(W_offd) ); hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(P)) = memory_location_P; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(P)) = memory_location_P; hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(P)) = P_diag_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixDiag(P)) = P_diag_j; hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(P)) = P_diag_data; hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(P)) = P_offd_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixJ(W_offd); hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixData(W_offd); hypre_CSRMatrixJ(W_offd) = NULL; hypre_CSRMatrixData(W_offd) = NULL; num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(W); if (hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(P))) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(P)) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); *P_ptr = P; hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_ff_inv); hypre_ParCSRMatrixDestroy(W); return 0; } /* Setup interpolation operator */ HYPRE_Int hypre_MGRBuildInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P, HYPRE_Int interp_type, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *P_ptr = NULL; //HYPRE_Real jac_trunc_threshold = trunc_factor; //HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); /* Interpolation for each level */ if (interp_type < 3) { if (exec == HYPRE_EXEC_HOST) { // hypre_MGRBuildP(A, CF_marker, num_cpts_global, interp_type, debug_flag, &P_ptr); hypre_MGRBuildPHost(A, CF_marker, num_cpts_global, interp_type, &P_ptr); //hypre_ParCSRMatrixPrintIJ(P_ptr, 0, 0, "P_host"); } #if defined(HYPRE_USING_CUDA) else { hypre_MGRBuildPDevice(A, CF_marker, num_cpts_global, interp_type, &P_ptr); //hypre_ParCSRMatrixPrintIJ(P_ptr, 0, 0, "P_device"); } #endif /* Could do a few sweeps of Jacobi to further improve Jacobi interpolation P */ /* if(interp_type == 2) { for(i=0; i<numsweeps; i++) { hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); } hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } */ } else if (interp_type == 4) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } #if defined(HYPRE_USING_CUDA) else { hypre_NoGPUSupport("interpolation"); } #endif } /* else if (interp_type == 99) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildInterpApproximateInverseExp(A, S, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } #if defined(HYPRE_USING_CUDA) else { hypre_NoGPUSupport("interpolation"); } #endif } */ else if (interp_type == 5) { hypre_BoomerAMGBuildModExtInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } else if (interp_type == 6) { hypre_BoomerAMGBuildModExtPIInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } else if (interp_type == 7) { hypre_BoomerAMGBuildModExtPEInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } /* set pointer to P */ *P = P_ptr; return hypre_error_flag; } /* Setup restriction operator */ HYPRE_Int hypre_MGRBuildRestrict(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Real strong_threshold, HYPRE_Real max_row_sum, hypre_ParCSRMatrix **R, HYPRE_Int restrict_type, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *R_ptr = NULL; hypre_ParCSRMatrix *AT = NULL; hypre_ParCSRMatrix *ST = NULL; // HYPRE_Real jac_trunc_threshold = trunc_factor; // HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); /* Build AT (transpose A) */ if (restrict_type > 0) { hypre_ParCSRMatrixTranspose(A, &AT, 1); } /* Restriction for each level */ if (restrict_type == 0) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildP(A, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_host"); } #if defined(HYPRE_USING_CUDA) else { hypre_MGRBuildPDevice(A, CF_marker, num_cpts_global, restrict_type, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_device"); } #endif } else if (restrict_type == 1 || restrict_type == 2) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildP(AT, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_host"); } #if defined(HYPRE_USING_CUDA) else { hypre_MGRBuildPDevice(AT, CF_marker, num_cpts_global, restrict_type, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_device"); } #endif } else if (restrict_type == 3) { /* move diagonal to first entry */ hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(AT)); hypre_MGRBuildInterpApproximateInverse(AT, CF_marker, num_cpts_global, debug_flag, &R_ptr); hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts); } else { /* Build new strength matrix */ hypre_BoomerAMGCreateS(AT, strong_threshold, max_row_sum, 1, NULL, &ST); /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(AT, CF_marker, ST, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &R_ptr); } /* set pointer to P */ *R = R_ptr; /* Free memory */ if (restrict_type > 0) { hypre_ParCSRMatrixDestroy(AT); } if (restrict_type > 5) { hypre_ParCSRMatrixDestroy(ST); } return hypre_error_flag; } void hypre_blas_smat_inv_n4 (HYPRE_Real *a) { const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; const HYPRE_Real M11 = a22 * a33 * a44 + a23 * a34 * a42 + a24 * a32 * a43 - a22 * a34 * a43 - a23 * a32 * a44 - a24 * a33 * a42; const HYPRE_Real M12 = a12 * a34 * a43 + a13 * a32 * a44 + a14 * a33 * a42 - a12 * a33 * a44 - a13 * a34 * a42 - a14 * a32 * a43; const HYPRE_Real M13 = a12 * a23 * a44 + a13 * a24 * a42 + a14 * a22 * a43 - a12 * a24 * a43 - a13 * a22 * a44 - a14 * a23 * a42; const HYPRE_Real M14 = a12 * a24 * a33 + a13 * a22 * a34 + a14 * a23 * a32 - a12 * a23 * a34 - a13 * a24 * a32 - a14 * a22 * a33; const HYPRE_Real M21 = a21 * a34 * a43 + a23 * a31 * a44 + a24 * a33 * a41 - a21 * a33 * a44 - a23 * a34 * a41 - a24 * a31 * a43; const HYPRE_Real M22 = a11 * a33 * a44 + a13 * a34 * a41 + a14 * a31 * a43 - a11 * a34 * a43 - a13 * a31 * a44 - a14 * a33 * a41; const HYPRE_Real M23 = a11 * a24 * a43 + a13 * a21 * a44 + a14 * a23 * a41 - a11 * a23 * a44 - a13 * a24 * a41 - a14 * a21 * a43; const HYPRE_Real M24 = a11 * a23 * a34 + a13 * a24 * a31 + a14 * a21 * a33 - a11 * a24 * a33 - a13 * a21 * a34 - a14 * a23 * a31; const HYPRE_Real M31 = a21 * a32 * a44 + a22 * a34 * a41 + a24 * a31 * a42 - a21 * a34 * a42 - a22 * a31 * a44 - a24 * a32 * a41; const HYPRE_Real M32 = a11 * a34 * a42 + a12 * a31 * a44 + a14 * a32 * a41 - a11 * a32 * a44 - a12 * a34 * a41 - a14 * a31 * a42; const HYPRE_Real M33 = a11 * a22 * a44 + a12 * a24 * a41 + a14 * a21 * a42 - a11 * a24 * a42 - a12 * a21 * a44 - a14 * a22 * a41; const HYPRE_Real M34 = a11 * a24 * a32 + a12 * a21 * a34 + a14 * a22 * a31 - a11 * a22 * a34 - a12 * a24 * a31 - a14 * a21 * a32; const HYPRE_Real M41 = a21 * a33 * a42 + a22 * a31 * a43 + a23 * a32 * a41 - a21 * a32 * a43 - a22 * a33 * a41 - a23 * a31 * a42; const HYPRE_Real M42 = a11 * a32 * a43 + a12 * a33 * a41 + a13 * a31 * a42 - a11 * a33 * a42 - a12 * a31 * a43 - a13 * a32 * a41; const HYPRE_Real M43 = a11 * a23 * a42 + a12 * a21 * a43 + a13 * a22 * a41 - a11 * a22 * a43 - a12 * a23 * a41 - a13 * a21 * a42; const HYPRE_Real M44 = a11 * a22 * a33 + a12 * a23 * a31 + a13 * a21 * a32 - a11 * a23 * a32 - a12 * a21 * a33 - a13 * a22 * a31; const HYPRE_Real det = a11 * M11 + a12 * M21 + a13 * M31 + a14 * M41; HYPRE_Real det_inv; //if ( fabs(det) < 1e-22 ) { //hypre_printf("### WARNING: Matrix is nearly singular! det = %e\n", det); /* printf("##----------------------------------------------\n"); printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2); printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5); printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7); printf("##----------------------------------------------\n"); getchar(); */ //} det_inv = 1.0 / det; a[0] = M11 * det_inv; a[1] = M12 * det_inv; a[2] = M13 * det_inv; a[3] = M14 * det_inv; a[4] = M21 * det_inv; a[5] = M22 * det_inv; a[6] = M23 * det_inv; a[7] = M24 * det_inv; a[8] = M31 * det_inv; a[9] = M32 * det_inv; a[10] = M33 * det_inv; a[11] = M34 * det_inv; a[12] = M41 * det_inv; a[13] = M42 * det_inv; a[14] = M43 * det_inv; a[15] = M44 * det_inv; } void hypre_blas_mat_inv(HYPRE_Real *a, HYPRE_Int n) { HYPRE_Int i, j, k, l, u, kn, in; HYPRE_Real alinv; if (n == 4) { hypre_blas_smat_inv_n4(a); } else { for (k = 0; k < n; ++k) { kn = k * n; l = kn + k; //if (fabs(a[l]) < SMALLREAL) { // printf("### WARNING: Diagonal entry is close to zero!"); // printf("### WARNING: diag_%d=%e\n", k, a[l]); // a[l] = SMALLREAL; //} alinv = 1.0 / a[l]; a[l] = alinv; for (j = 0; j < k; ++j) { u = kn + j; a[u] *= alinv; } for (j = k + 1; j < n; ++j) { u = kn + j; a[u] *= alinv; } for (i = 0; i < k; ++i) { in = i * n; for (j = 0; j < n; ++j) if (j != k) { u = in + j; a[u] -= a[in + k] * a[kn + j]; } // end if (j!=k) } for (i = k + 1; i < n; ++i) { in = i * n; for (j = 0; j < n; ++j) if (j != k) { u = in + j; a[u] -= a[in + k] * a[kn + j]; } // end if (j!=k) } for (i = 0; i < k; ++i) { u = i * n + k; a[u] *= -alinv; } for (i = k + 1; i < n; ++i) { u = i * n + k; a[u] *= -alinv; } } // end for (k=0; k<n; ++k) }// end if } HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr, void *mgr_vdata, HYPRE_Int debug_flag) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int num_procs, my_id; HYPRE_Int blk_size = (mgr_data -> block_size); HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag; HYPRE_Real *B_diag_data; HYPRE_Int *B_diag_i; HYPRE_Int *B_diag_j; hypre_CSRMatrix *B_offd; HYPRE_Int i, ii; HYPRE_Int j, jj; HYPRE_Int k; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int n_block, left_size, inv_size; // HYPRE_Real wall_time; /* for debugging instrumentation */ HYPRE_Int bidx, bidxm1, bidxp1; HYPRE_Real * diaginv; const HYPRE_Int nb2 = blk_size * blk_size; HYPRE_Int block_scaling_error = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); // HYPRE_Int num_threads = hypre_NumThreads(); //printf("n = %d\n",n); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size * n_block; } else { n_block = n / blk_size; left_size = n - blk_size * n_block; } inv_size = nb2 * n_block + left_size * left_size; //printf("inv_size = %d\n",inv_size); hypre_blockRelax_setup(A, blk_size, reserved_coarse_size, &(mgr_data -> diaginv)); // if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * First Pass: Determine size of B and fill in *-----------------------------------------------------------------------*/ B_diag_i = hypre_CTAlloc(HYPRE_Int, n + 1, HYPRE_MEMORY_HOST); B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST); B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); B_diag_i[n] = inv_size; //B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); //B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); //B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); //B_offd_i[n] = 1; /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST); //printf("n_block = %d\n",n_block); for (i = 0; i < n_block; i++) { bidxm1 = i * blk_size; bidxp1 = (i + 1) * blk_size; for (k = 0; k < blk_size; k++) { for (j = 0; j < blk_size; j++) { bidx = k * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = k * blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } /* for (k = 0;k < blk_size; k++) */ /* { */ /* for (j = 0;j < blk_size; j++) */ /* { */ /* bidx = k*blk_size + j; */ /* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */ /* } */ /* } */ hypre_blas_mat_inv(diaginv, blk_size); for (k = 0; k < blk_size; k++) { B_diag_i[i * blk_size + k] = i * nb2 + k * blk_size; //B_offd_i[i*nb2+k] = 0; for (j = 0; j < blk_size; j++) { bidx = i * nb2 + k * blk_size + j; B_diag_j[bidx] = i * blk_size + j; B_diag_data[bidx] = diaginv[k * blk_size + j]; } } } //printf("Before create\n"); B = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), 0, inv_size, 0); //printf("After create\n"); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixData(B_diag) = B_diag_data; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixData(B_offd) = NULL; hypre_CSRMatrixI(B_offd) = NULL; hypre_CSRMatrixJ(B_offd) = NULL; *B_ptr = B; return (block_scaling_error); } HYPRE_Int hypre_blockRelax_solve (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Int method, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size * blk_size; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); // HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { for (j = 0; j < blk_size; j++) { bidx = i * blk_size + j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx + 1]; jj++) { ii = A_diag_j[jj]; if (method == 0) { // Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } else if (method == 1) { // Gauss-Seidel for diagonal part res[j] -= A_diag_data[jj] * u_data[ii]; } else { // Default do Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx + 1]; jj++) { // always do Jacobi for off-diagonal part ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0; j < blk_size; j++) { bidx1 = i * blk_size + j; for (k = 0; k < blk_size; k++) { bidx = i * nb2 + j * blk_size + k; u_data[bidx1] += res[k] * diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return (relax_error); } HYPRE_Int hypre_block_gs (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size * blk_size; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { for (j = 0; j < blk_size; j++) { bidx = i * blk_size + j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx + 1]; jj++) { ii = A_diag_j[jj]; //res[j] -= A_diag_data[jj] * Vtemp_data[ii]; //printf("my_id = %d, %d: Au = %e * %e\n",my_id,ii,A_diag_data[jj],Vtemp_data[ii]); res[j] -= A_diag_data[jj] * u_data[ii]; //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx + 1]; jj++) { ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0; j < blk_size; j++) { bidx1 = i * blk_size + j; for (k = 0; k < blk_size; k++) { bidx = i * nb2 + j * blk_size + k; u_data[bidx1] += res[k] * diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return (relax_error); } /*Block smoother*/ HYPRE_Int hypre_blockRelax_setup(hypre_ParCSRMatrix *A, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Real **diaginvptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidxm1, bidxp1; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size * blk_size; HYPRE_Int n_block; HYPRE_Int left_size, inv_size; HYPRE_Real *diaginv = *diaginvptr; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size * n_block; } else { n_block = n / blk_size; left_size = n - blk_size * n_block; } inv_size = nb2 * n_block + left_size * left_size; if (diaginv != NULL) { hypre_TFree(diaginv, HYPRE_MEMORY_HOST); diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } else { diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { bidxm1 = i * blk_size; bidxp1 = (i + 1) * blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0; k < blk_size; k++) { for (j = 0; j < blk_size; j++) { bidx = i * nb2 + k * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i * nb2 + k * blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0; i < left_size; i++) { bidxm1 = n_block * nb2 + i * blk_size; bidxp1 = n_block * nb2 + (i + 1) * blk_size; for (j = 0; j < left_size; j++) { bidx = n_block * nb2 + i * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block * blk_size + i]; ii < A_diag_i[n_block * blk_size + i + 1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block * blk_size) { bidx = n_block * nb2 + i * blk_size + jj - n_block * blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0; i < n_block; i++) { hypre_blas_mat_inv(diaginv + i * nb2, blk_size); } hypre_blas_mat_inv(diaginv + (HYPRE_Int)(blk_size * nb2), left_size); } else { for (i = 0; i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) { diaginv[i] = 0.0; } else { diaginv[i] = 1.0 / diaginv[i]; } } } *diaginvptr = diaginv; return 1; } HYPRE_Int hypre_blockRelax(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int method, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidxm1, bidxp1; HYPRE_Int relax_error = 0; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size * blk_size; HYPRE_Int n_block; HYPRE_Int left_size, inv_size; HYPRE_Real *diaginv; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size * n_block; } else { n_block = n / blk_size; left_size = n - blk_size * n_block; } inv_size = nb2 * n_block + left_size * left_size; diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { bidxm1 = i * blk_size; bidxp1 = (i + 1) * blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0; k < blk_size; k++) { for (j = 0; j < blk_size; j++) { bidx = i * nb2 + k * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i * nb2 + k * blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0; i < left_size; i++) { bidxm1 = n_block * nb2 + i * blk_size; bidxp1 = n_block * nb2 + (i + 1) * blk_size; for (j = 0; j < left_size; j++) { bidx = n_block * nb2 + i * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block * blk_size + i]; ii < A_diag_i[n_block * blk_size + i + 1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block * blk_size) { bidx = n_block * nb2 + i * blk_size + jj - n_block * blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0; i < n_block; i++) { hypre_blas_mat_inv(diaginv + i * nb2, blk_size); } hypre_blas_mat_inv(diaginv + (HYPRE_Int)(blk_size * nb2), left_size); /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ } else { for (i = 0; i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) { diaginv[i] = 0.0; } else { diaginv[i] = 1.0 / diaginv[i]; } } } hypre_blockRelax_solve(A, f, u, blk_size, n_block, left_size, method, diaginv, Vtemp); /*----------------------------------------------------------------- * Free temperary memeory *-----------------------------------------------------------------*/ hypre_TFree(diaginv, HYPRE_MEMORY_HOST); return (relax_error); } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetFSolver( void *mgr_vdata, HYPRE_Int (*fine_grid_solver_solve)(void*, void*, void*, void*), HYPRE_Int (*fine_grid_solver_setup)(void*, void*, void*, void*), void *fsolver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); HYPRE_Solver **aff_solver = (mgr_data -> aff_solver); if (aff_solver == NULL) { aff_solver = hypre_CTAlloc(HYPRE_Solver*, max_num_coarse_levels, HYPRE_MEMORY_HOST); } /* only allow to set F-solver for the first level */ aff_solver[0] = (HYPRE_Solver *) fsolver; (mgr_data -> fine_grid_solver_solve) = fine_grid_solver_solve; (mgr_data -> fine_grid_solver_setup) = fine_grid_solver_setup; (mgr_data -> aff_solver) = aff_solver; (mgr_data -> fsolver_mode) = 0; return hypre_error_flag; } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseSolver( void *mgr_vdata, HYPRE_Int (*coarse_grid_solver_solve)(void*, void*, void*, void*), HYPRE_Int (*coarse_grid_solver_setup)(void*, void*, void*, void*), void *coarse_grid_solver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } (mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve; (mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup; (mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver; (mgr_data -> use_default_cgrid_solver) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetAffInv( void *mgr_vdata, hypre_ParCSRMatrix *A_ff_inv ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> A_ff_inv) = A_ff_inv; return hypre_error_flag; } /* Set the maximum number of coarse levels. * maxcoarselevs = 1 yields the default 2-grid scheme. */ HYPRE_Int hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_num_coarse_levels) = maxcoarselevs; return hypre_error_flag; } /* Set the system block size */ HYPRE_Int hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> block_size) = bsize; return hypre_error_flag; } /* Set the relaxation type for the fine levels of the reduction. * Currently supports the following flavors of relaxation types * as described in the documentation: * relax_types 0 - 8, 13, 14, 18, 19, 98. * See par_relax.c and par_relax_more.c for more details. * */ HYPRE_Int hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> relax_type) = relax_type; return hypre_error_flag; } /* Set the number of relaxation sweeps */ HYPRE_Int hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_relax_sweeps) = nsweeps; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method; } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetLevelFRelaxMethod( void *mgr_vdata, HYPRE_Int *relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (relax_method != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = 0; } } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Coarse grid method: 0=Galerkin RAP, 1=non-Galerkin with dropping*/ HYPRE_Int hypre_MGRSetCoarseGridMethod( void *mgr_vdata, HYPRE_Int *cg_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> use_non_galerkin_cg) != NULL) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } HYPRE_Int *use_non_galerkin_cg = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (cg_method != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = cg_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = 0; } } (mgr_data -> use_non_galerkin_cg) = use_non_galerkin_cg; return hypre_error_flag; } /* Set the F-relaxation number of functions for each level */ HYPRE_Int hypre_MGRSetLevelFRelaxNumFunctions( void *mgr_vdata, HYPRE_Int *num_functions ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> Frelax_num_functions) != NULL) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } HYPRE_Int *Frelax_num_functions = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (num_functions != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = num_functions[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = 1; } } (mgr_data -> Frelax_num_functions) = Frelax_num_functions; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetLevelRestrictType( void *mgr_vdata, HYPRE_Int *restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (restrict_type != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = *(restrict_type + i); } } else { for (i = 0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = 0; } } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = restrict_type; } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_restrict_sweeps) = nsweeps; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_coarse_levels; i++) { level_interp_type[i] = interpType; } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetLevelInterpType( void *mgr_vdata, HYPRE_Int *interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (interpType != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { level_interp_type[i] = *(interpType + i); } } else { for (i = 0; i < max_num_coarse_levels; i++) { level_interp_type[i] = 2; } } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_interp_sweeps) = nsweeps; return hypre_error_flag; } /* Set the threshold to truncate the coarse grid at each * level of reduction */ HYPRE_Int hypre_MGRSetTruncateCoarseGridThreshold( void *mgr_vdata, HYPRE_Real threshold) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> truncate_coarse_grid_threshold) = threshold; return hypre_error_flag; } /* Set print level for F-relaxation solver */ HYPRE_Int hypre_MGRSetFrelaxPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> frelax_print_level) = print_level; return hypre_error_flag; } /* Set print level for coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseGridPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> cg_print_level) = print_level; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> print_level) = print_level; return hypre_error_flag; } /* Set logging level for mgr solver */ HYPRE_Int hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> logging) = logging; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_iter) = max_iter; return hypre_error_flag; } /* Set convergence tolerance for mgr solver */ HYPRE_Int hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> tol) = tol; return hypre_error_flag; } /* Set max number of iterations for mgr global smoother */ HYPRE_Int hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_iters) = max_iter; return hypre_error_flag; } /* Set global smoothing type for mgr solver */ HYPRE_Int hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_type) = iter_type; return hypre_error_flag; } /* Set the maximum number of non-zero entries for restriction and interpolation operator if classical AMG interpolation is used */ HYPRE_Int hypre_MGRSetPMaxElmts( void *mgr_vdata, HYPRE_Int P_max_elmts) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> P_max_elmts) = P_max_elmts; return hypre_error_flag; } /* Get number of iterations for MGR solver */ HYPRE_Int hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *num_iterations = mgr_data->num_iterations; return hypre_error_flag; } /* Get residual norms for MGR solver */ HYPRE_Int hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *res_norm = mgr_data->final_rel_residual_norm; return hypre_error_flag; } HYPRE_Int hypre_MGRGetCoarseGridConvergenceFactor( void *mgr_vdata, HYPRE_Real *conv_factor ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *conv_factor = (mgr_data -> cg_convergence_factor); return hypre_error_flag; } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRGetSubBlock( hypre_ParCSRMatrix *A, HYPRE_Int *row_cf_marker, HYPRE_Int *col_cf_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_block_ptr ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); hypre_IntArray *coarse_dof_func_ptr = NULL; HYPRE_BigInt num_row_cpts_global[2]; HYPRE_BigInt num_col_cpts_global[2]; hypre_ParCSRMatrix *Ablock; HYPRE_BigInt *col_map_offd_Ablock; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *Ablock_diag; hypre_CSRMatrix *Ablock_offd; HYPRE_Real *Ablock_diag_data; HYPRE_Int *Ablock_diag_i; HYPRE_Int *Ablock_diag_j; HYPRE_Real *Ablock_offd_data; HYPRE_Int *Ablock_offd_i; HYPRE_Int *Ablock_offd_j; HYPRE_Int Ablock_diag_size, Ablock_offd_size; HYPRE_Int *Ablock_marker; HYPRE_Int ii_counter; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int start_indexing = 0; /* start indexing for Aff_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; HYPRE_Int *col_coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_row_cpts; HYPRE_BigInt total_global_col_cpts; HYPRE_Int num_cols_Ablock_offd; // HYPRE_BigInt my_first_row_cpt, my_first_col_cpt; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); hypre_IntArray *wrap_cf; // HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; /* get the number of coarse rows */ wrap_cf = hypre_IntArrayCreate(local_numrows); hypre_IntArrayMemoryLocation(wrap_cf) = HYPRE_MEMORY_HOST; hypre_IntArrayData(wrap_cf) = row_cf_marker; hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, wrap_cf, &coarse_dof_func_ptr, num_row_cpts_global); hypre_IntArrayDestroy(coarse_dof_func_ptr); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_row_cpts_global[0], num_row_cpts_global[1]); // my_first_row_cpt = num_row_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_row_cpts = num_row_cpts_global[1]; } hypre_MPI_Bcast(&total_global_row_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /* get the number of coarse rows */ hypre_IntArrayData(wrap_cf) = col_cf_marker; hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, wrap_cf, &coarse_dof_func_ptr, num_col_cpts_global); hypre_IntArrayDestroy(coarse_dof_func_ptr); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_col_cpts_global[0], num_col_cpts_global[1]); // my_first_col_cpt = num_col_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_col_cpts = num_col_cpts_global[1]; } hypre_MPI_Bcast(&total_global_col_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = col_cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); /*----------------------------------------------------------------------- * First Pass: Determine size of Ablock and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); col_coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (col_cf_marker[i] > 0) { fine_to_coarse[i] = col_coarse_counter[j]; col_coarse_counter[j]++; } if (row_cf_marker[i] > 0) { //fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; coarse_counter[i + 1] += coarse_counter[i]; col_coarse_counter[i + 1] += col_coarse_counter[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; ii_counter = coarse_counter[i]; Ablock_diag_size = jj_counter; Ablock_diag_i = hypre_CTAlloc(HYPRE_Int, ii_counter + 1, memory_location); Ablock_diag_j = hypre_CTAlloc(HYPRE_Int, Ablock_diag_size, memory_location); Ablock_diag_data = hypre_CTAlloc(HYPRE_Real, Ablock_diag_size, memory_location); Ablock_diag_i[ii_counter] = jj_counter; Ablock_offd_size = jj_counter_offd; Ablock_offd_i = hypre_CTAlloc(HYPRE_Int, ii_counter + 1, memory_location); Ablock_offd_j = hypre_CTAlloc(HYPRE_Int, Ablock_offd_size, memory_location); Ablock_offd_data = hypre_CTAlloc(HYPRE_Real, Ablock_offd_size, memory_location); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; //----------------------------------------------------------------------- // Send and receive fine_to_coarse info. //----------------------------------------------------------------------- // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) { coarse_shift = col_coarse_counter[j - 1]; } size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif // for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt; #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } ii_counter = 0; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (row_cf_marker[i] > 0) { // Diagonal part of Ablock // Ablock_diag_i[ii_counter] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { Ablock_diag_j[jj_counter] = fine_to_coarse[i1]; Ablock_diag_data[jj_counter] = A_diag_data[jj]; jj_counter++; } } // Off-Diagonal part of Ablock // Ablock_offd_i[ii_counter] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { Ablock_offd_j[jj_counter_offd] = i1; Ablock_offd_data[jj_counter_offd] = A_offd_data[jj]; jj_counter_offd++; } } } ii_counter++; } } Ablock_offd_i[ii_counter] = jj_counter_offd; Ablock_diag_i[ii_counter] = jj_counter; } Ablock = hypre_ParCSRMatrixCreate(comm, total_global_row_cpts, total_global_col_cpts, num_row_cpts_global, num_col_cpts_global, 0, Ablock_diag_i[ii_counter], Ablock_offd_i[ii_counter]); Ablock_diag = hypre_ParCSRMatrixDiag(Ablock); hypre_CSRMatrixData(Ablock_diag) = Ablock_diag_data; hypre_CSRMatrixI(Ablock_diag) = Ablock_diag_i; hypre_CSRMatrixJ(Ablock_diag) = Ablock_diag_j; Ablock_offd = hypre_ParCSRMatrixOffd(Ablock); hypre_CSRMatrixData(Ablock_offd) = Ablock_offd_data; hypre_CSRMatrixI(Ablock_offd) = Ablock_offd_i; hypre_CSRMatrixJ(Ablock_offd) = Ablock_offd_j; num_cols_Ablock_offd = 0; if (Ablock_offd_size) { Ablock_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < num_cols_A_offd; i++) { Ablock_marker[i] = 0; } num_cols_Ablock_offd = 0; for (i = 0; i < Ablock_offd_size; i++) { index = Ablock_offd_j[i]; if (!Ablock_marker[index]) { num_cols_Ablock_offd++; Ablock_marker[index] = 1; } } col_map_offd_Ablock = hypre_CTAlloc(HYPRE_BigInt, num_cols_Ablock_offd, memory_location); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_Ablock_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_Ablock_offd; i++) { while (Ablock_marker[index] == 0) { index++; } tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < Ablock_offd_size; i++) Ablock_offd_j[i] = hypre_BinarySearch(tmp_map_offd, Ablock_offd_j[i], num_cols_Ablock_offd); hypre_TFree(Ablock_marker, HYPRE_MEMORY_HOST); } if (num_cols_Ablock_offd) { hypre_ParCSRMatrixColMapOffd(Ablock) = col_map_offd_Ablock; hypre_CSRMatrixNumCols(Ablock_offd) = num_cols_Ablock_offd; } hypre_GetCommPkgRTFromCommPkgA(Ablock, A, fine_to_coarse, tmp_map_offd); /* Create the assumed partition */ if (hypre_ParCSRMatrixAssumedPartition(Ablock) == NULL) { hypre_ParCSRMatrixCreateAssumedPartition(Ablock); } *A_block_ptr = Ablock; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(col_coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return (0); } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRBuildAff( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int i; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* create a copy of the CF_marker array and switch C-points to F-points */ HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_numrows, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < local_numrows; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_MGRGetSubBlock(A, CF_marker_copy, CF_marker_copy, debug_flag, A_ff_ptr); /* Free copy of CF marker */ hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return (0); } /********************************************************************************* * This routine assumes that the 'toVector' is larger than the 'fromVector' and * the CF_marker is of the same length as the toVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'fromVector'. * It adds the values of the 'fromVector' to the 'toVector' where the marker is the * same as the 'point_type' *********************************************************************************/ HYPRE_Int hypre_MGRAddVectorP ( hypre_IntArray *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int *CF_marker_data = hypre_IntArrayData(CF_marker); //HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector); HYPRE_Int n = hypre_IntArraySize(CF_marker); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker_data[i] == point_type) { toVectorData[i] = b * toVectorData[i] + a * fromVectorData[j]; j++; } } return 0; } /************************************************************************************* * This routine assumes that the 'fromVector' is larger than the 'toVector' and * the CF_marker is of the same length as the fromVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'toVector'. * It adds the values of the 'fromVector' where the marker is the * same as the 'point_type' to the 'toVector' *************************************************************************************/ HYPRE_Int hypre_MGRAddVectorR ( hypre_IntArray *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int *CF_marker_data = hypre_IntArrayData(CF_marker); //HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector); HYPRE_Int n = hypre_IntArraySize(CF_marker); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker_data[i] == point_type) { toVectorData[j] = b * toVectorData[j] + a * fromVectorData[i]; j++; } } return 0; } /* HYPRE_Int hypre_MGRBuildAffRAP( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr, hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST); HYPRE_Int i; for (i = 0; i < local_num_variables; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr); hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr); hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr); hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return 0; } */ /* Get pointer to coarse grid matrix for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> RAP == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n"); return hypre_error_flag; } *RAP = mgr_data->RAP; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> U_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *sol = mgr_data->U_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> F_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *rhs = mgr_data->F_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Print coarse grid linear system (for debugging)*/ HYPRE_Int hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; mgr_data->print_coarse_system = print_flag; return hypre_error_flag; } /* Print solver params */ HYPRE_Int hypre_MGRWriteSolverParams(void *mgr_vdata) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i, j; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); hypre_printf("MGR Setup parameters: \n"); hypre_printf("Block size: %d\n", (mgr_data -> block_size)); hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels)); hypre_printf("Relax type: %d\n", (mgr_data -> relax_type)); hypre_printf("Set non-Cpoints to F-points: %d\n", (mgr_data -> set_non_Cpoints_to_F)); hypre_printf("Set Cpoints method: %d\n", (mgr_data -> set_c_points_method)); for (i = 0; i < max_num_coarse_levels; i++) { hypre_printf("Lev = %d, Interpolation type: %d\n", i, (mgr_data -> interp_type)[i]); hypre_printf("Lev = %d, Restriction type: %d\n", i, (mgr_data -> restrict_type)[i]); hypre_printf("Lev = %d, F-relaxation method: %d\n", i, (mgr_data -> Frelax_method)[i]); hypre_printf("Lev = %d, Use non-Galerkin coarse grid: %d\n", i, (mgr_data -> use_non_galerkin_cg)[i]); HYPRE_Int lvl_num_coarse_points = (mgr_data -> block_num_coarse_indexes)[i]; hypre_printf("Lev = %d, Number of Cpoints: %d\n", i, lvl_num_coarse_points); hypre_printf("Cpoints indices: "); for (j = 0; j < lvl_num_coarse_points; j++) { if ((mgr_data -> block_cf_marker)[i][j] == 1) { hypre_printf("%d ", j); } } hypre_printf("\n"); } hypre_printf("Number of Reserved Cpoints: %d\n", (mgr_data -> reserved_coarse_size)); hypre_printf("Keep reserved Cpoints to level: %d\n", (mgr_data -> lvl_to_keep_cpoints)); hypre_printf("\n MGR Solver Parameters: \n"); hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps)); hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps)); hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps)); hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type)); hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters)); hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter)); hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol)); hypre_printf("Use default coarse grid solver: %d\n", (mgr_data -> use_default_cgrid_solver)); if ((mgr_data -> fsolver_mode) >= 0) { hypre_printf("Use AMG solver for full AMG F-relaxation: %d\n", (mgr_data -> fsolver_mode)); } return hypre_error_flag; } #ifdef HYPRE_USING_DSUPERLU void * hypre_MGRDirectSolverCreate() { hypre_DSLUData *dslu_data = hypre_CTAlloc(hypre_DSLUData, 1, HYPRE_MEMORY_HOST); return (void *) dslu_data; } HYPRE_Int hypre_MGRDirectSolverSetup( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { /* Par Data Structure variables */ HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_local; HYPRE_Int num_rows; HYPRE_Int num_procs, my_id; HYPRE_Int pcols = 1, prows = 1; HYPRE_BigInt *big_rowptr = NULL; hypre_DSLUData *dslu_data = (hypre_DSLUData *) solver; HYPRE_Int info = 0; HYPRE_Int nrhs = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* Merge diag and offd into one matrix (global ids) */ A_local = hypre_MergeDiagAndOffd(A); num_rows = hypre_CSRMatrixNumRows(A_local); /* Now convert hypre matrix to a SuperMatrix */ #ifdef HYPRE_MIXEDINT { HYPRE_Int *rowptr = NULL; HYPRE_Int i; rowptr = hypre_CSRMatrixI(A_local); big_rowptr = hypre_CTAlloc(HYPRE_BigInt, (num_rows + 1), HYPRE_MEMORY_HOST); for (i = 0; i < (num_rows + 1); i++) { big_rowptr[i] = (HYPRE_BigInt)rowptr[i]; } } #else big_rowptr = hypre_CSRMatrixI(A_local); #endif dCreate_CompRowLoc_Matrix_dist( &(dslu_data->A_dslu), global_num_rows, global_num_rows, hypre_CSRMatrixNumNonzeros(A_local), num_rows, hypre_ParCSRMatrixFirstRowIndex(A), hypre_CSRMatrixData(A_local), hypre_CSRMatrixBigJ(A_local), big_rowptr, SLU_NR_loc, SLU_D, SLU_GE); /* DOK: SuperLU frees assigned data, so set them to null before * calling hypre_CSRMatrixdestroy on A_local to avoid memory errors. */ #ifndef HYPRE_MIXEDINT hypre_CSRMatrixI(A_local) = NULL; #endif hypre_CSRMatrixData(A_local) = NULL; hypre_CSRMatrixBigJ(A_local) = NULL; hypre_CSRMatrixDestroy(A_local); /*Create process grid */ while (prows * pcols <= num_procs) { ++prows; } --prows; pcols = num_procs / prows; while (prows * pcols != num_procs) { prows -= 1; pcols = num_procs / prows; } //hypre_printf(" prows %d pcols %d\n", prows, pcols); superlu_gridinit(comm, prows, pcols, &(dslu_data->dslu_data_grid)); set_default_options_dist(&(dslu_data->dslu_options)); dslu_data->dslu_options.Fact = DOFACT; dslu_data->dslu_options.PrintStat = NO; /*dslu_data->dslu_options.IterRefine = SLU_DOUBLE; dslu_data->dslu_options.ColPerm = MMD_AT_PLUS_A; dslu_data->dslu_options.DiagPivotThresh = 1.0; dslu_data->dslu_options.ReplaceTinyPivot = NO; */ dScalePermstructInit(global_num_rows, global_num_rows, &(dslu_data->dslu_ScalePermstruct)); dLUstructInit(global_num_rows, &(dslu_data->dslu_data_LU)); PStatInit(&(dslu_data->dslu_data_stat)); dslu_data->global_num_rows = global_num_rows; dslu_data->berr = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); dslu_data->berr[0] = 0.0; pdgssvx(&(dslu_data->dslu_options), &(dslu_data->A_dslu), &(dslu_data->dslu_ScalePermstruct), NULL, num_rows, nrhs, &(dslu_data->dslu_data_grid), &(dslu_data->dslu_data_LU), &(dslu_data->dslu_solve), dslu_data->berr, &(dslu_data->dslu_data_stat), &info); dslu_data->dslu_options.Fact = FACTORED; return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverSolve( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { hypre_SLUDistSolve(solver, f, u); return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverDestroy( void *solver ) { hypre_SLUDistDestroy(solver); return hypre_error_flag; } #endif
GB_split_bitmap_template.c
//------------------------------------------------------------------------------ // GB_split_bitmap_template: split a bitmap matrix into a bitmap tile //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { //-------------------------------------------------------------------------- // get C and the tile A //-------------------------------------------------------------------------- #ifndef GB_ISO_SPLIT const GB_CTYPE *restrict Ax = (GB_CTYPE *) A->x ; GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ; #endif int64_t pC ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) \ reduction(+:cnz) for (pC = 0 ; pC < cnzmax ; pC++) { int64_t i = pC % cvlen ; int64_t j = pC / cvlen ; int64_t iA = aistart + i ; int64_t jA = avstart + j ; int64_t pA = iA + jA * avlen ; Cb [pC] = Ab [pA] ; if (Ab [pA]) { // Cx [pC] = Ax [pA] ; GB_COPY (pC, pA) ; cnz++ ; } } done = true ; } #undef GB_CTYPE #undef GB_ISO_SPLIT
fig310-mxv-omp.c
/* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. Copyright 2009 Sun Microsystems, Inc. All rights reserved. The contents of this file are subject to the terms of the BSD License("BSD")(the "License"). You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt The BSD License Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistribution of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistribution in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Sun Microsystems, Inc. or the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided "AS IS," without a warranty of any kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. You acknowledge that this software is not designed, licensed or intended for use in the design, construction, operation or maintenance of any nuclear facility. */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #define M 10 #define N 10 void mxv(int m, int n, double * restrict a, double * restrict b, double * restrict c); int main(int argc, char *argv[]) { double *a,*b,*c; int i, j, m, n; /* REPLACED WITH defines printf("Please give m and n: "); scanf("%d %d",&m,&n); printf("\n"); */ m = M; n = N; if ( (a=(double *)malloc(m*sizeof(double))) == NULL ) perror("memory allocation for a"); if ( (b=(double *)malloc(m*n*sizeof(double))) == NULL ) perror("memory allocation for b"); if ( (c=(double *)malloc(n*sizeof(double))) == NULL ) perror("memory allocation for c"); printf("Initializing matrix B and vector c\n"); for (j=0; j<n; j++) c[j] = 2.0; for (i=0; i<m; i++) for (j=0; j<n; j++) b[i*n+j] = i; printf("Executing mxv function for m = %d n = %d\n",m,n); (void) mxv(m, n, a, b, c); free(a);free(b);free(c); return(0); } void mxv(int m, int n, double * restrict a, double * restrict b, double * restrict c) { int i, j; #pragma omp parallel for default(none) \ shared(m,n,a,b,c) private(i,j) for (i=0; i<m; i++) { a[i] = 0.0; for (j=0; j<n; j++) a[i] += b[i*n+j]*c[j]; } /*-- End of omp parallel for --*/ }
ptmp.c
/* * Copyright (c) 2016-2018 Ilya Kaliman * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <err.h> #ifdef LIBPT_USE_MPI #include <mpi.h> #endif #include "pt.h" extern void *(*libpt_malloc)(size_t); extern void (*libpt_free)(void *); void sgemm_(char *, char *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); static void gemm(char transa, char transb, int m, int n, int k, float alpha, const float *a, int lda, const float *b, int ldb, float beta, float *c, int ldc) { sgemm_(&transa, &transb, &m, &n, &k, &alpha, (float *)a, &lda, (float *)b, &ldb, &beta, c, &ldc); } static void t2_i_ovvv_half(size_t o, size_t v, size_t i, size_t j, size_t k, float *abc, const float *t2, const float *i_ovvv) { const float *t2_p = &t2[i*o*v*v+j*v*v]; const float *i_ovvv_p = &i_ovvv[k*v*v*(v-1)/2]; /* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */ gemm('T', 'T', v, v*(v-1)/2, v, 1.0, t2_p, v, i_ovvv_p, v*(v-1)/2, 0.0, abc, v); } static void t2_baba_i_ovvv_aaaa_half(size_t oa, size_t va, size_t ob, size_t vb, size_t i, size_t j, size_t k, float *abc, const float *t2, const float *i_ovvv) { const float *t2_p = &t2[i*oa*vb*va+j*vb*va]; const float *i_ovvv_p = &i_ovvv[k*va*va*(va-1)/2]; (void)ob; /* unused */ /* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */ gemm('T', 'T', vb, va*(va-1)/2, va, 1.0, t2_p, va, i_ovvv_p, va*(va-1)/2, 0.0, abc, vb); } static void t2_aaaa_i_ovvv_baba(size_t oa, size_t va, size_t ob, size_t vb, size_t i, size_t j, size_t k, float *abc, const float *t2, const float *i_ovvv) { const float *t2_p = &t2[i*oa*va*va+j*va*va]; const float *i_ovvv_p = &i_ovvv[k*va*vb*va]; (void)ob; /* unused */ /* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */ gemm('T', 'T', va, va*vb, va, 1.0, t2_p, va, i_ovvv_p, va*vb, 0.0, abc, va); } static void t2_abab_i_ovvv_abab(size_t oa, size_t va, size_t ob, size_t vb, size_t i, size_t j, size_t k, float *abc, const float *t2, const float *i_ovvv) { const float *t2_p = &t2[i*ob*va*vb+j*va*vb]; const float *i_ovvv_p = &i_ovvv[k*vb*va*vb]; (void)oa; /* unused */ /* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */ gemm('T', 'T', va, va*vb, vb, 1.0, t2_p, vb, i_ovvv_p, va*vb, 0.0, abc, va); } static void t2_i_oovo(size_t o, size_t v, size_t i, size_t j, size_t k, float *abc, const float *t2, const float *i_oovo) { const float *t2_p = &t2[i*o*v*v]; const float *i_oovo_p = &i_oovo[j*o*o*v+k*o*v]; /* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */ gemm('N', 'N', v*v, v, o, 1.0, t2_p, v*v, i_oovo_p, o, 0.0, abc, v*v); } static void t2_aaaa_i_oovo_baba(size_t oa, size_t va, size_t ob, size_t vb, size_t i, size_t j, size_t k, float *abc, const float *t2, const float *i_oovo) { const float *t2_p = &t2[i*oa*va*va]; const float *i_oovo_p = &i_oovo[j*oa*vb*oa+k*vb*oa]; (void)ob; /* unused */ /* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */ gemm('N', 'N', va*va, vb, oa, 1.0, t2_p, va*va, i_oovo_p, oa, 0.0, abc, va*va); } static void t2_abab_i_oovo_abab(size_t oa, size_t va, size_t ob, size_t vb, size_t i, size_t j, size_t k, float *abc, const float *t2, const float *i_oovo) { const float *t2_p = &t2[i*ob*va*vb]; const float *i_oovo_p = &i_oovo[j*ob*va*ob+k*va*ob]; (void)oa; /* unused */ /* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */ gemm('N', 'N', va*vb, va, ob, 1.0, t2_p, va*vb, i_oovo_p, ob, 0.0, abc, va*vb); } static void t2_baba_i_oovo_aaaa(size_t oa, size_t va, size_t ob, size_t vb, size_t i, size_t j, size_t k, float *abc, const float *t2, const float *i_oovo) { const float *t2_p = &t2[i*oa*vb*va]; const float *i_oovo_p = &i_oovo[j*oa*va*oa+k*va*oa]; (void)ob; /* unused */ /* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */ gemm('N', 'N', va*vb, va, oa, 1.0, t2_p, va*vb, i_oovo_p, oa, 0.0, abc, va*vb); } static float i_jk_a_bc_ov_oovv(size_t o, size_t v, const float *ov, const float *oovv, size_t i, size_t j, size_t k, size_t a, size_t b, size_t c) { return +ov[i*v+a]*oovv[j*o*v*v+k*v*v+b*v+c] -ov[j*v+a]*oovv[i*o*v*v+k*v*v+b*v+c] -ov[k*v+a]*oovv[j*o*v*v+i*v*v+b*v+c] -ov[i*v+b]*oovv[j*o*v*v+k*v*v+a*v+c] +ov[j*v+b]*oovv[i*o*v*v+k*v*v+a*v+c] +ov[k*v+b]*oovv[j*o*v*v+i*v*v+a*v+c] -ov[i*v+c]*oovv[j*o*v*v+k*v*v+b*v+a] +ov[j*v+c]*oovv[i*o*v*v+k*v*v+b*v+a] +ov[k*v+c]*oovv[j*o*v*v+i*v*v+b*v+a]; } static float comp_t3b_ijkabc(size_t v1, size_t o2, size_t v2a, size_t v2b, size_t i, size_t j, size_t k, size_t a, size_t b, size_t c, const float *t1, const float *i_oovv, const float *f_ov, const float *t2) { return t1[i*v1+a] * i_oovv[j*o2*v2a*v2b+k*v2a*v2b+b*v2b+c] + f_ov[i*v1+a] * t2[j*o2*v2a*v2b+k*v2a*v2b+b*v2b+c]; } static double cc_pt_aaa(size_t oa, size_t va, const float *d_ov, const float *f_ov, const float *t1, const float *t2_aaaa, const float *i_oovo_aaaa, const float *i_oovv_aaaa, const float *i_ovvv_aaaa) { double e_pt = 0.0; int rank = 0, size = 1; if (oa == 0 || va == 0) return 0.0; #ifdef LIBPT_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); #endif #ifdef _OPENMP #pragma omp parallel #endif { size_t i, j, k, a, b, c, it, *ijk, nijk = 0; float *t3ax1, *abc1; if ((ijk = libpt_malloc(oa*oa*oa*sizeof(*ijk))) == NULL) err(1, "libpt malloc ijk"); for (i = 0, it = 0; i < oa; i++) { for (j = i+1; j < oa; j++) { for (k = j+1; k < oa; k++, it++) { if ((int)it % size == rank) { ijk[3*nijk+0] = i; ijk[3*nijk+1] = j; ijk[3*nijk+2] = k; nijk++; } } } } if ((t3ax1 = libpt_malloc(2*va*va*va*sizeof(*t3ax1))) == NULL) err(1, "libpt malloc work"); abc1 = t3ax1 + va*va*va; #ifdef _OPENMP #pragma omp for reduction(+:e_pt) schedule(dynamic) #endif for (it = 0; it < nijk; it++) { i = ijk[3*it+0]; j = ijk[3*it+1]; k = ijk[3*it+2]; t2_i_ovvv_half(oa,va,i,j,k,abc1,t2_aaaa,i_ovvv_aaaa); for (a = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++) t3ax1[a*va*va+b*va+c] = +abc1[a*(a-1)/2*va+b*va+c] -abc1[a*(a-1)/2*va+c*va+b] +abc1[b*(b-1)/2*va+c*va+a]; t2_i_ovvv_half(oa,va,i,k,j,abc1,t2_aaaa,i_ovvv_aaaa); for (a = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++) t3ax1[a*va*va+b*va+c] += -abc1[a*(a-1)/2*va+b*va+c] +abc1[a*(a-1)/2*va+c*va+b] -abc1[b*(b-1)/2*va+c*va+a]; t2_i_ovvv_half(oa,va,k,j,i,abc1,t2_aaaa,i_ovvv_aaaa); for (a = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++) t3ax1[a*va*va+b*va+c] += -abc1[a*(a-1)/2*va+b*va+c] +abc1[a*(a-1)/2*va+c*va+b] -abc1[b*(b-1)/2*va+c*va+a]; t2_i_oovo(oa,va,i,j,k,abc1,t2_aaaa,i_oovo_aaaa); for (a = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++) t3ax1[a*va*va+b*va+c] += +abc1[a*va*va+b*va+c] -abc1[b*va*va+a*va+c] -abc1[c*va*va+b*va+a]; t2_i_oovo(oa,va,j,i,k,abc1,t2_aaaa,i_oovo_aaaa); for (a = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++) t3ax1[a*va*va+b*va+c] += -abc1[a*va*va+b*va+c] +abc1[b*va*va+a*va+c] +abc1[c*va*va+b*va+a]; t2_i_oovo(oa,va,k,j,i,abc1,t2_aaaa,i_oovo_aaaa); for (a = 0; a < va; a++) { for (b = 0; b < a; b++) { for (c = 0; c < b; c++) { float t3ax, t3bx, dn; t3ax1[a*va*va+b*va+c] += -abc1[a*va*va+b*va+c] +abc1[b*va*va+a*va+c] +abc1[c*va*va+b*va+a]; dn = d_ov[i*va+a] + d_ov[j*va+b] + d_ov[k*va+c]; t3ax = t3ax1[a*va*va+b*va+c]; t3bx = +i_jk_a_bc_ov_oovv(oa,va,t1,i_oovv_aaaa,i,j,k,a,b,c) +i_jk_a_bc_ov_oovv(oa,va,f_ov,t2_aaaa,i,j,k,a,b,c); e_pt += t3ax * (t3ax-t3bx) / dn; }}} } libpt_free(ijk); libpt_free(t3ax1); } return (e_pt); } static double cc_pt_aab(size_t oa, size_t va, size_t ob, size_t vb, const float *d_ov_aa, const float *d_ov_bb, const float *f_ov_aa, const float *f_ov_bb, const float *t1_aa, const float *t1_bb, const float *t2_aaaa, const float *t2_abab, const float *t2_baba, const float *i_oovo_aaaa, const float *i_oovo_abab, const float *i_oovo_baba, const float *i_oovv_aaaa, const float *i_oovv_abab, const float *i_ovvv_aaaa, const float *i_ovvv_abab, const float *i_ovvv_baba) { double e_pt = 0.0; int rank = 0, size = 1; if (oa == 0 || va == 0 || ob == 0 || vb == 0) return 0.0; #ifdef LIBPT_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); #endif #ifdef _OPENMP #pragma omp parallel #endif { size_t i, j, k, a, b, c, it, *ijk, nijk = 0; float *t3ax1, *abc1, *abc11, *abc12; if ((ijk = libpt_malloc(2*oa*oa*ob*sizeof(*ijk))) == NULL) err(1, "libpt malloc ijk"); for (i = 0, it = 0; i < oa; i++) { for (j = i+1; j < oa; j++) { for (k = 0; k < ob; k++, it++) { if ((int)it % size == rank) { ijk[3*nijk+0] = i; ijk[3*nijk+1] = j; ijk[3*nijk+2] = k; nijk++; } } } } if ((t3ax1 = libpt_malloc(2*va*va*vb*sizeof(*t3ax1))) == NULL) err(1, "libpt malloc work"); abc1 = t3ax1 + va*va*vb; abc11 = t3ax1 + va*va*vb; abc12 = t3ax1 + va*va*vb + vb*va*(va-1)/2; #ifdef _OPENMP #pragma omp for reduction(+:e_pt) schedule(dynamic) #endif for (it = 0; it < nijk; it++) { i = ijk[3*it+0]; j = ijk[3*it+1]; k = ijk[3*it+2]; t2_aaaa_i_ovvv_baba(oa,va,ob,vb,i,j,k,abc1,t2_aaaa,i_ovvv_baba); for (a = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++) t3ax1[a*va*vb+b*vb+c] = -abc1[a+b*va+c*va*va] +abc1[b+a*va+c*va*va]; t2_abab_i_ovvv_abab(oa,va,ob,vb,i,k,j,abc1,t2_abab,i_ovvv_abab); for (a = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++) t3ax1[a*va*vb+b*vb+c] += -abc1[a+c*va+b*va*vb] +abc1[b+c*va+a*va*vb]; t2_abab_i_ovvv_abab(oa,va,ob,vb,j,k,i,abc1,t2_abab,i_ovvv_abab); for (a = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++) t3ax1[a*va*vb+b*vb+c] += +abc1[a+c*va+b*va*vb] -abc1[b+c*va+a*va*vb]; t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,j,i,abc11,t2_baba,i_ovvv_aaaa); t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,i,j,abc12,t2_baba,i_ovvv_aaaa); for (a = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++) t3ax1[a*va*vb+b*vb+c] += -abc11[c+vb*a*(a-1)/2+vb*b] +abc12[c+vb*a*(a-1)/2+vb*b]; t2_aaaa_i_oovo_baba(oa,va,ob,vb,i,k,j,abc1,t2_aaaa,i_oovo_baba); for (a = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++) t3ax1[a*va*vb+b*vb+c] += -abc1[b+a*va+c*va*va]; t2_aaaa_i_oovo_baba(oa,va,ob,vb,j,k,i,abc1,t2_aaaa,i_oovo_baba); for (a = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++) t3ax1[a*va*vb+b*vb+c] += +abc1[b+a*va+c*va*va]; t2_abab_i_oovo_abab(oa,va,ob,vb,i,j,k,abc1,t2_abab,i_oovo_abab); for (a = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++) t3ax1[a*va*vb+b*vb+c] += -abc1[c+a*vb+b*vb*va] +abc1[c+b*vb+a*vb*va]; t2_abab_i_oovo_abab(oa,va,ob,vb,j,i,k,abc1,t2_abab,i_oovo_abab); for (a = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++) t3ax1[a*va*vb+b*vb+c] += -abc1[c+b*vb+a*vb*va] +abc1[c+a*vb+b*vb*va]; t2_baba_i_oovo_aaaa(oa,va,ob,vb,k,j,i,abc1,t2_baba,i_oovo_aaaa); for (a = 0; a < va; a++) { for (b = 0; b < a; b++) { for (c = 0; c < vb; c++) { float t3ax, t3bx, dn; t3ax1[a*va*vb+b*vb+c] += -abc1[a+c*va+b*va*vb] +abc1[b+c*va+a*va*vb]; t3bx = +comp_t3b_ijkabc(va,ob,va,vb,i,j,k,a,b,c, t1_aa,i_oovv_abab,f_ov_aa,t2_abab) -comp_t3b_ijkabc(va,ob,va,vb,i,j,k,b,a,c, t1_aa,i_oovv_abab,f_ov_aa,t2_abab) -comp_t3b_ijkabc(va,ob,va,vb,j,i,k,a,b,c, t1_aa,i_oovv_abab,f_ov_aa,t2_abab) +comp_t3b_ijkabc(va,ob,va,vb,j,i,k,b,a,c, t1_aa,i_oovv_abab,f_ov_aa,t2_abab) +comp_t3b_ijkabc(vb,oa,va,va,k,j,i,c,b,a, t1_bb,i_oovv_aaaa,f_ov_bb,t2_aaaa); dn = d_ov_aa[i*va+a] + d_ov_aa[j*va+b] + d_ov_bb[k*vb+c]; t3ax = t3ax1[a*va*vb+b*vb+c]; e_pt += t3ax * (t3ax-t3bx) / dn; }}} } libpt_free(ijk); libpt_free(t3ax1); } return (e_pt); } double libpt_rpt_mp(size_t oa, size_t va, const float *d_ov, const float *f_ov, const float *t1, const float *t2, const float *i_oovo, const float *i_oovv, const float *i_ovvv) { double e_pt = 0.0; const float *t2_aaaa = t2; const float *t2_abab = t2 + oa*oa*va*va; const float *i_ovvv_aaaa = i_ovvv; const float *i_ovvv_abab = i_ovvv + oa*va*va*(va-1)/2; const float *i_oovo_aaaa = i_oovo; const float *i_oovo_abab = i_oovo + oa*oa*oa*va; const float *i_oovv_aaaa = i_oovv; const float *i_oovv_abab = i_oovv + oa*oa*va*va; e_pt += cc_pt_aaa(oa, va, d_ov, f_ov, t1, t2_aaaa, i_oovo_aaaa, i_oovv_aaaa, i_ovvv_aaaa); e_pt += cc_pt_aab(oa, va, oa, va, d_ov, d_ov, f_ov, f_ov, t1, t1, t2_aaaa, t2_abab, t2_abab, i_oovo_aaaa, i_oovo_abab, i_oovo_abab, i_oovv_aaaa, i_oovv_abab, i_ovvv_aaaa, i_ovvv_abab, i_ovvv_abab); #ifdef LIBPT_USE_MPI MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #endif return 2.0 * e_pt; } double libpt_upt_mp(size_t oa, size_t va, size_t ob, size_t vb, const float *d_ov, const float *f_ov, const float *t1, const float *t2, const float *i_oovo, const float *i_oovv, const float *i_ovvv) { double e_pt = 0.0; const float *d_ov_aa = d_ov; const float *d_ov_bb = d_ov_aa + oa*va; const float *f_ov_aa = f_ov; const float *f_ov_bb = f_ov_aa + oa*va; const float *t1_aa = t1; const float *t1_bb = t1_aa + oa*va; const float *t2_aaaa = t2; const float *t2_abab = t2_aaaa + oa*oa*va*va; const float *t2_bbbb = t2_abab + oa*ob*va*vb; const float *t2_baba = t2_bbbb + ob*ob*vb*vb; const float *i_oovo_aaaa = i_oovo; const float *i_oovo_abab = i_oovo_aaaa + oa*oa*va*oa; const float *i_oovo_bbbb = i_oovo_abab + oa*ob*va*ob; const float *i_oovo_baba = i_oovo_bbbb + ob*ob*vb*ob; const float *i_oovv_aaaa = i_oovv; const float *i_oovv_abab = i_oovv_aaaa + oa*oa*va*va; const float *i_oovv_bbbb = i_oovv_abab + oa*ob*va*vb; const float *i_oovv_baba = i_oovv_bbbb + ob*ob*vb*vb; const float *i_ovvv_aaaa = i_ovvv; const float *i_ovvv_abab = i_ovvv_aaaa + oa*va*va*(va-1)/2; const float *i_ovvv_bbbb = i_ovvv_abab + oa*vb*va*vb; const float *i_ovvv_baba = i_ovvv_bbbb + ob*vb*vb*(vb-1)/2; /* aaaaaa */ e_pt += cc_pt_aaa(oa, va, d_ov_aa, f_ov_aa, t1_aa, t2_aaaa, i_oovo_aaaa, i_oovv_aaaa, i_ovvv_aaaa); /* bbbbbb */ e_pt += cc_pt_aaa(ob, vb, d_ov_bb, f_ov_bb, t1_bb, t2_bbbb, i_oovo_bbbb, i_oovv_bbbb, i_ovvv_bbbb); /* aabaab */ e_pt += cc_pt_aab(oa, va, ob, vb, d_ov_aa, d_ov_bb, f_ov_aa, f_ov_bb, t1_aa, t1_bb, t2_aaaa, t2_abab, t2_baba, i_oovo_aaaa, i_oovo_abab, i_oovo_baba, i_oovv_aaaa, i_oovv_abab, i_ovvv_aaaa, i_ovvv_abab, i_ovvv_baba); /* bbabba */ e_pt += cc_pt_aab(ob, vb, oa, va, d_ov_bb, d_ov_aa, f_ov_bb, f_ov_aa, t1_bb, t1_aa, t2_bbbb, t2_baba, t2_abab, i_oovo_bbbb, i_oovo_baba, i_oovo_abab, i_oovv_bbbb, i_oovv_baba, i_ovvv_bbbb, i_ovvv_baba, i_ovvv_abab); #ifdef LIBPT_USE_MPI MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #endif return e_pt; } static double cc_ft_aaa(size_t oa, size_t va, const float *d_ov, const float *f2_ov, const float *l1, const float *t2, const float *l2, const float *i_oovv, const float *i2_t2f2_oovo, const float *i3_ovvv, const float *i6_oovo, const float *i7_ovvv) { double e_pt = 0.0; int rank = 0, size = 1; if (oa == 0 || va == 0) return 0.0; #ifdef LIBPT_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); #endif #ifdef _OPENMP #pragma omp parallel #endif { size_t i, j, k, a, b, c, t, it, *ijk, nijk = 0; float *sigvvvl, *sigvvvr, *abc1; if ((ijk = libpt_malloc(oa*oa*oa*sizeof(*ijk))) == NULL) err(1, "libpt malloc ijk"); for (i = 0, it = 0; i < oa; i++) { for (j = i+1; j < oa; j++) { for (k = j+1; k < oa; k++, it++) { if ((int)it % size == rank) { ijk[3*nijk+0] = i; ijk[3*nijk+1] = j; ijk[3*nijk+2] = k; nijk++; } } } } if ((sigvvvl = libpt_malloc(2*va*va*va*sizeof(*sigvvvl))) == NULL) err(1, "libpt malloc work"); sigvvvr = sigvvvl + va*va*(va-1)/2; abc1 = sigvvvl + va*va*va; #ifdef _OPENMP #pragma omp for reduction(+:e_pt) schedule(dynamic) #endif for (it = 0; it < nijk; it++) { i = ijk[3*it+0]; j = ijk[3*it+1]; k = ijk[3*it+2]; t2_i_ovvv_half(oa,va,i,j,k,abc1,l2,i7_ovvv); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++, t++) sigvvvl[t] = +abc1[a*(a-1)/2*va+b*va+c] -abc1[a*(a-1)/2*va+c*va+b] +abc1[b*(b-1)/2*va+c*va+a]; t2_i_ovvv_half(oa,va,k,j,i,abc1,l2,i7_ovvv); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++, t++) sigvvvl[t] += -abc1[a*(a-1)/2*va+b*va+c] +abc1[a*(a-1)/2*va+c*va+b] -abc1[b*(b-1)/2*va+c*va+a]; t2_i_ovvv_half(oa,va,i,k,j,abc1,l2,i7_ovvv); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++, t++) sigvvvl[t] += -abc1[a*(a-1)/2*va+b*va+c] +abc1[a*(a-1)/2*va+c*va+b] -abc1[b*(b-1)/2*va+c*va+a]; t2_i_oovo(oa,va,i,j,k,abc1,l2,i6_oovo); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++, t++) sigvvvl[t] += +abc1[a*va*va+b*va+c] -abc1[b*va*va+a*va+c] -abc1[c*va*va+b*va+a]; t2_i_oovo(oa,va,j,i,k,abc1,l2,i6_oovo); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++, t++) sigvvvl[t] += -abc1[a*va*va+b*va+c] +abc1[b*va*va+a*va+c] +abc1[c*va*va+b*va+a]; t2_i_oovo(oa,va,k,j,i,abc1,l2,i6_oovo); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++, t++) sigvvvl[t] += -abc1[a*va*va+b*va+c] +abc1[b*va*va+a*va+c] +abc1[c*va*va+b*va+a]; t2_i_ovvv_half(oa,va,i,j,k,abc1,t2,i3_ovvv); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++, t++) sigvvvr[t] = +abc1[a*(a-1)/2*va+b*va+c] -abc1[a*(a-1)/2*va+c*va+b] +abc1[b*(b-1)/2*va+c*va+a]; t2_i_ovvv_half(oa,va,k,j,i,abc1,t2,i3_ovvv); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++, t++) sigvvvr[t] += -abc1[a*(a-1)/2*va+b*va+c] +abc1[a*(a-1)/2*va+c*va+b] -abc1[b*(b-1)/2*va+c*va+a]; t2_i_ovvv_half(oa,va,i,k,j,abc1,t2,i3_ovvv); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++, t++) sigvvvr[t] += -abc1[a*(a-1)/2*va+b*va+c] +abc1[a*(a-1)/2*va+c*va+b] -abc1[b*(b-1)/2*va+c*va+a]; t2_i_oovo(oa,va,i,j,k,abc1,t2,i2_t2f2_oovo); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++, t++) sigvvvr[t] += +abc1[a*va*va+b*va+c] -abc1[b*va*va+a*va+c] -abc1[c*va*va+b*va+a]; t2_i_oovo(oa,va,j,i,k,abc1,t2,i2_t2f2_oovo); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < b; c++, t++) sigvvvr[t] += -abc1[a*va*va+b*va+c] +abc1[b*va*va+a*va+c] +abc1[c*va*va+b*va+a]; t2_i_oovo(oa,va,k,j,i,abc1,t2,i2_t2f2_oovo); for (a = 0, t = 0; a < va; a++) { for (b = 0; b < a; b++) { for (c = 0; c < b; c++, t++) { float dn, l1t; sigvvvr[t] += -abc1[a*va*va+b*va+c] +abc1[b*va*va+a*va+c] +abc1[c*va*va+b*va+a]; dn = d_ov[i*va+a] + d_ov[j*va+b] + d_ov[k*va+c]; l1t = +i_jk_a_bc_ov_oovv(oa,va,l1,i_oovv,i,j,k,a,b,c) +i_jk_a_bc_ov_oovv(oa,va,f2_ov,l2,i,j,k,a,b,c); e_pt += (sigvvvl[t] - l1t) * sigvvvr[t] / dn; }}} } libpt_free(ijk); libpt_free(sigvvvl); } return (e_pt); } static double cc_ft_aab(size_t oa, size_t va, size_t ob, size_t vb, const float *d_ov_aa, const float *d_ov_bb, const float *f2_ov_aa, const float *f2_ov_bb, const float *l1_aa, const float *l1_bb, const float *t2_aaaa, const float *t2_abab, const float *t2_baba, const float *l2_aaaa, const float *l2_abab, const float *l2_baba, const float *i_oovv_aaaa, const float *i_oovv_abab, const float *i2_t2f2_oovo_aaaa, const float *i2_t2f2_oovo_abab, const float *i2_t2f2_oovo_baba, const float *i3_ovvv_aaaa, const float *i3_ovvv_abab, const float *i3_ovvv_baba, const float *i6_oovo_aaaa, const float *i6_oovo_abab, const float *i6_oovo_baba, const float *i7_ovvv_aaaa, const float *i7_ovvv_abab, const float *i7_ovvv_baba) { double e_pt = 0.0; int rank = 0, size = 1; if (oa == 0 || va == 0 || ob == 0 || vb == 0) return 0.0; #ifdef LIBPT_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); #endif #ifdef _OPENMP #pragma omp parallel #endif { size_t i, j, k, a, b, c, t, it, *ijk, nijk = 0; float *sigvvvl, *sigvvvr, *abc1, *abc11, *abc12; if ((ijk = libpt_malloc(2*oa*oa*ob*sizeof(*ijk))) == NULL) err(1, "libpt malloc ijk"); for (i = 0, it = 0; i < oa; i++) { for (j = i+1; j < oa; j++) { for (k = 0; k < ob; k++, it++) { if ((int)it % size == rank) { ijk[3*nijk+0] = i; ijk[3*nijk+1] = j; ijk[3*nijk+2] = k; nijk++; } } } } if ((sigvvvl = libpt_malloc(2*va*va*vb*sizeof(*sigvvvl))) == NULL) err(1, "libpt malloc work"); sigvvvr = sigvvvl + vb*va*(va-1)/2; abc1 = sigvvvl + va*va*vb; abc11 = sigvvvl + va*va*vb; abc12 = sigvvvl + va*va*vb + vb*va*(va-1)/2; #ifdef _OPENMP #pragma omp for reduction(+:e_pt) schedule(dynamic) #endif for (it = 0; it < nijk; it++) { i = ijk[3*it+0]; j = ijk[3*it+1]; k = ijk[3*it+2]; t2_aaaa_i_ovvv_baba(oa,va,ob,vb,i,j,k,abc1,l2_aaaa,i7_ovvv_baba); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvl[t] = -abc1[a+b*va+c*va*va] +abc1[b+a*va+c*va*va]; t2_abab_i_ovvv_abab(oa,va,ob,vb,i,k,j,abc1,l2_abab,i7_ovvv_abab); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvl[t] += -abc1[a+c*va+b*va*vb] +abc1[b+c*va+a*va*vb]; t2_abab_i_ovvv_abab(oa,va,ob,vb,j,k,i,abc1,l2_abab,i7_ovvv_abab); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvl[t] += +abc1[a+c*va+b*va*vb] -abc1[b+c*va+a*va*vb]; t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,j,i,abc11,l2_baba,i7_ovvv_aaaa); t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,i,j,abc12,l2_baba,i7_ovvv_aaaa); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvl[t] += -abc11[c+vb*a*(a-1)/2+vb*b] +abc12[c+vb*a*(a-1)/2+vb*b]; t2_aaaa_i_oovo_baba(oa,va,ob,vb,i,k,j,abc1,l2_aaaa,i6_oovo_baba); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvl[t] += -abc1[b+a*va+c*va*va]; t2_aaaa_i_oovo_baba(oa,va,ob,vb,j,k,i,abc1,l2_aaaa,i6_oovo_baba); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvl[t] += +abc1[b+a*va+c*va*va]; t2_abab_i_oovo_abab(oa,va,ob,vb,i,j,k,abc1,l2_abab,i6_oovo_abab); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvl[t] += -abc1[c+a*vb+b*vb*va] +abc1[c+b*vb+a*vb*va]; t2_abab_i_oovo_abab(oa,va,ob,vb,j,i,k,abc1,l2_abab,i6_oovo_abab); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvl[t] += -abc1[c+b*vb+a*vb*va] +abc1[c+a*vb+b*vb*va]; t2_baba_i_oovo_aaaa(oa,va,ob,vb,k,j,i,abc1,l2_baba,i6_oovo_aaaa); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvl[t] += -abc1[a+c*va+b*va*vb] +abc1[b+c*va+a*va*vb]; t2_aaaa_i_ovvv_baba(oa,va,ob,vb,i,j,k,abc1,t2_aaaa,i3_ovvv_baba); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvr[t] = -abc1[a+b*va+c*va*va] +abc1[b+a*va+c*va*va]; t2_abab_i_ovvv_abab(oa,va,ob,vb,i,k,j,abc1,t2_abab,i3_ovvv_abab); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvr[t] += -abc1[a+c*va+b*va*vb] +abc1[b+c*va+a*va*vb]; t2_abab_i_ovvv_abab(oa,va,ob,vb,j,k,i,abc1,t2_abab,i3_ovvv_abab); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvr[t] += +abc1[a+c*va+b*va*vb] -abc1[b+c*va+a*va*vb]; t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,j,i,abc11,t2_baba,i3_ovvv_aaaa); t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,i,j,abc12,t2_baba,i3_ovvv_aaaa); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvr[t] += -abc11[c+vb*a*(a-1)/2+vb*b] +abc12[c+vb*a*(a-1)/2+vb*b]; t2_aaaa_i_oovo_baba(oa,va,ob,vb,i,k,j,abc1,t2_aaaa,i2_t2f2_oovo_baba); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvr[t] += -abc1[b+a*va+c*va*va]; t2_aaaa_i_oovo_baba(oa,va,ob,vb,j,k,i,abc1,t2_aaaa,i2_t2f2_oovo_baba); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvr[t] += +abc1[b+a*va+c*va*va]; t2_abab_i_oovo_abab(oa,va,ob,vb,i,j,k,abc1,t2_abab,i2_t2f2_oovo_abab); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvr[t] += -abc1[c+a*vb+b*vb*va] +abc1[c+b*vb+a*vb*va]; t2_abab_i_oovo_abab(oa,va,ob,vb,j,i,k,abc1,t2_abab,i2_t2f2_oovo_abab); for (a = 0, t = 0; a < va; a++) for (b = 0; b < a; b++) for (c = 0; c < vb; c++, t++) sigvvvr[t] += -abc1[c+b*vb+a*vb*va] +abc1[c+a*vb+b*vb*va]; t2_baba_i_oovo_aaaa(oa,va,ob,vb,k,j,i,abc1,t2_baba,i2_t2f2_oovo_aaaa); for (a = 0, t = 0; a < va; a++) { for (b = 0; b < a; b++) { for (c = 0; c < vb; c++, t++) { float dn, l1t; sigvvvr[t] += -abc1[a+c*va+b*va*vb] +abc1[b+c*va+a*va*vb]; l1t = +comp_t3b_ijkabc(va,ob,va,vb,i,j,k,a,b,c, l1_aa,i_oovv_abab,f2_ov_aa,l2_abab) -comp_t3b_ijkabc(va,ob,va,vb,i,j,k,b,a,c, l1_aa,i_oovv_abab,f2_ov_aa,l2_abab) -comp_t3b_ijkabc(va,ob,va,vb,j,i,k,a,b,c, l1_aa,i_oovv_abab,f2_ov_aa,l2_abab) +comp_t3b_ijkabc(va,ob,va,vb,j,i,k,b,a,c, l1_aa,i_oovv_abab,f2_ov_aa,l2_abab) +comp_t3b_ijkabc(vb,oa,va,va,k,j,i,c,b,a, l1_bb,i_oovv_aaaa,f2_ov_bb,l2_aaaa); dn = d_ov_aa[i*va+a] + d_ov_aa[j*va+b] + d_ov_bb[k*vb+c]; e_pt += (sigvvvl[t] - l1t) * sigvvvr[t] / dn; }}} } libpt_free(ijk); libpt_free(sigvvvl); } return (e_pt); } double libpt_rft_mp(size_t oa, size_t va, const float *d_ov, const float *f2_ov, const float *l1, const float *t2, const float *l2, const float *i_oovv, const float *i2_t2f2_oovo, const float *i3_ovvv, const float *i6_oovo, const float *i7_ovvv) { double e_pt = 0.0; const float *t2_aaaa = t2; const float *t2_abab = t2 + oa*oa*va*va; const float *l2_aaaa = l2; const float *l2_abab = l2 + oa*oa*va*va; const float *i_oovv_aaaa = i_oovv; const float *i_oovv_abab = i_oovv + oa*oa*va*va; const float *i2_t2f2_oovo_aaaa = i2_t2f2_oovo; const float *i2_t2f2_oovo_abab = i2_t2f2_oovo + oa*oa*oa*va; const float *i3_ovvv_aaaa = i3_ovvv; const float *i3_ovvv_abab = i3_ovvv + oa*va*va*(va-1)/2; const float *i6_oovo_aaaa = i6_oovo; const float *i6_oovo_abab = i6_oovo + oa*oa*oa*va; const float *i7_ovvv_aaaa = i7_ovvv; const float *i7_ovvv_abab = i7_ovvv + oa*va*va*(va-1)/2; e_pt += cc_ft_aaa(oa, va, d_ov, f2_ov, l1, t2_aaaa, l2_aaaa, i_oovv_aaaa, i2_t2f2_oovo_aaaa, i3_ovvv_aaaa, i6_oovo_aaaa, i7_ovvv_aaaa); e_pt += cc_ft_aab(oa, va, oa, va, d_ov, d_ov, f2_ov, f2_ov, l1, l1, t2_aaaa, t2_abab, t2_abab, l2_aaaa, l2_abab, l2_abab, i_oovv_aaaa, i_oovv_abab, i2_t2f2_oovo_aaaa, i2_t2f2_oovo_abab, i2_t2f2_oovo_abab, i3_ovvv_aaaa, i3_ovvv_abab, i3_ovvv_abab, i6_oovo_aaaa, i6_oovo_abab, i6_oovo_abab, i7_ovvv_aaaa, i7_ovvv_abab, i7_ovvv_abab); #ifdef LIBPT_USE_MPI MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #endif return 2.0 * e_pt; } double libpt_uft_mp(size_t oa, size_t va, size_t ob, size_t vb, const float *d_ov, const float *f2_ov, const float *l1, const float *t2, const float *l2, const float *i_oovv, const float *i2_t2f2_oovo, const float *i3_ovvv, const float *i6_oovo, const float *i7_ovvv) { double e_pt = 0.0; const float *d_ov_aa = d_ov; const float *d_ov_bb = d_ov_aa + oa*va; const float *f2_ov_aa = f2_ov; const float *f2_ov_bb = f2_ov_aa + oa*va; const float *l1_aa = l1; const float *l1_bb = l1_aa + oa*va; const float *t2_aaaa = t2; const float *t2_abab = t2_aaaa + oa*oa*va*va; const float *t2_bbbb = t2_abab + oa*ob*va*vb; const float *t2_baba = t2_bbbb + ob*ob*vb*vb; const float *l2_aaaa = l2; const float *l2_abab = l2_aaaa + oa*oa*va*va; const float *l2_bbbb = l2_abab + oa*ob*va*vb; const float *l2_baba = l2_bbbb + ob*ob*vb*vb; const float *i_oovv_aaaa = i_oovv; const float *i_oovv_abab = i_oovv_aaaa + oa*oa*va*va; const float *i_oovv_bbbb = i_oovv_abab + oa*ob*va*vb; const float *i_oovv_baba = i_oovv_bbbb + ob*ob*vb*vb; const float *i2_t2f2_oovo_aaaa = i2_t2f2_oovo; const float *i2_t2f2_oovo_abab = i2_t2f2_oovo_aaaa + oa*oa*va*oa; const float *i2_t2f2_oovo_bbbb = i2_t2f2_oovo_abab + oa*ob*va*ob; const float *i2_t2f2_oovo_baba = i2_t2f2_oovo_bbbb + ob*ob*vb*ob; const float *i3_ovvv_aaaa = i3_ovvv; const float *i3_ovvv_abab = i3_ovvv_aaaa + oa*va*va*(va-1)/2; const float *i3_ovvv_bbbb = i3_ovvv_abab + oa*vb*va*vb; const float *i3_ovvv_baba = i3_ovvv_bbbb + ob*vb*vb*(vb-1)/2; const float *i6_oovo_aaaa = i6_oovo; const float *i6_oovo_abab = i6_oovo_aaaa + oa*oa*va*oa; const float *i6_oovo_bbbb = i6_oovo_abab + oa*ob*va*ob; const float *i6_oovo_baba = i6_oovo_bbbb + ob*ob*vb*ob; const float *i7_ovvv_aaaa = i7_ovvv; const float *i7_ovvv_abab = i7_ovvv_aaaa + oa*va*va*(va-1)/2; const float *i7_ovvv_bbbb = i7_ovvv_abab + oa*vb*va*vb; const float *i7_ovvv_baba = i7_ovvv_bbbb + ob*vb*vb*(vb-1)/2; /* aaaaaa */ e_pt += cc_ft_aaa(oa, va, d_ov_aa, f2_ov_aa, l1_aa, t2_aaaa, l2_aaaa, i_oovv_aaaa, i2_t2f2_oovo_aaaa, i3_ovvv_aaaa, i6_oovo_aaaa, i7_ovvv_aaaa); /* bbbbbb */ e_pt += cc_ft_aaa(ob, vb, d_ov_bb, f2_ov_bb, l1_bb, t2_bbbb, l2_bbbb, i_oovv_bbbb, i2_t2f2_oovo_bbbb, i3_ovvv_bbbb, i6_oovo_bbbb, i7_ovvv_bbbb); /* aabaab */ e_pt += cc_ft_aab(oa, va, ob, vb, d_ov_aa, d_ov_bb, f2_ov_aa, f2_ov_bb, l1_aa, l1_bb, t2_aaaa, t2_abab, t2_baba, l2_aaaa, l2_abab, l2_baba, i_oovv_aaaa, i_oovv_abab, i2_t2f2_oovo_aaaa, i2_t2f2_oovo_abab, i2_t2f2_oovo_baba, i3_ovvv_aaaa, i3_ovvv_abab, i3_ovvv_baba, i6_oovo_aaaa, i6_oovo_abab, i6_oovo_baba, i7_ovvv_aaaa, i7_ovvv_abab, i7_ovvv_baba); /* bbabba */ e_pt += cc_ft_aab(ob, vb, oa, va, d_ov_bb, d_ov_aa, f2_ov_bb, f2_ov_aa, l1_bb, l1_aa, t2_bbbb, t2_baba, t2_abab, l2_bbbb, l2_baba, l2_abab, i_oovv_bbbb, i_oovv_baba, i2_t2f2_oovo_bbbb, i2_t2f2_oovo_baba, i2_t2f2_oovo_abab, i3_ovvv_bbbb, i3_ovvv_baba, i3_ovvv_abab, i6_oovo_bbbb, i6_oovo_baba, i6_oovo_abab, i7_ovvv_bbbb, i7_ovvv_baba, i7_ovvv_abab); #ifdef LIBPT_USE_MPI MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #endif return e_pt; }
threading_utils.h
/*! * Copyright 2015-2019 by Contributors * \file common.h * \brief Threading utilities */ #ifndef XGBOOST_COMMON_THREADING_UTILS_H_ #define XGBOOST_COMMON_THREADING_UTILS_H_ #include <dmlc/common.h> #include <vector> #include <algorithm> #include <type_traits> // std::is_signed #include "xgboost/logging.h" namespace xgboost { namespace common { // Represent simple range of indexes [begin, end) // Inspired by tbb::blocked_range class Range1d { public: Range1d(size_t begin, size_t end): begin_(begin), end_(end) { CHECK_LT(begin, end); } size_t begin() const { // NOLINT return begin_; } size_t end() const { // NOLINT return end_; } private: size_t begin_; size_t end_; }; // Split 2d space to balanced blocks // Implementation of the class is inspired by tbb::blocked_range2d // However, TBB provides only (n x m) 2d range (matrix) separated by blocks. Example: // [ 1,2,3 ] // [ 4,5,6 ] // [ 7,8,9 ] // But the class is able to work with different sizes in each 'row'. Example: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // If grain_size is 2: It produces following blocks: // [1,2], [3,4], [5,6], [7,8], [9] // The class helps to process data in several tree nodes (non-balanced usually) in parallel // Using nested parallelism (by nodes and by data in each node) // it helps to improve CPU resources utilization class BlockedSpace2d { public: // Example of space: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // BlockedSpace2d will create following blocks (tasks) if grain_size=2: // 1-block: first_dimension = 0, range of indexes in a 'row' = [0,2) (includes [1,2] values) // 2-block: first_dimension = 1, range of indexes in a 'row' = [0,2) (includes [3,4] values) // 3-block: first_dimension = 1, range of indexes in a 'row' = [2,4) (includes [5,6] values) // 4-block: first_dimension = 2, range of indexes in a 'row' = [0,2) (includes [7,8] values) // 5-block: first_dimension = 2, range of indexes in a 'row' = [2,3) (includes [9] values) // Arguments: // dim1 - size of the first dimension in the space // getter_size_dim2 - functor to get the second dimensions for each 'row' by row-index // grain_size - max size of produced blocks template<typename Func> BlockedSpace2d(size_t dim1, Func getter_size_dim2, size_t grain_size) { for (size_t i = 0; i < dim1; ++i) { const size_t size = getter_size_dim2(i); const size_t n_blocks = size/grain_size + !!(size % grain_size); for (size_t iblock = 0; iblock < n_blocks; ++iblock) { const size_t begin = iblock * grain_size; const size_t end = std::min(begin + grain_size, size); AddBlock(i, begin, end); } } } // Amount of blocks(tasks) in a space size_t Size() const { return ranges_.size(); } // get index of the first dimension of i-th block(task) size_t GetFirstDimension(size_t i) const { CHECK_LT(i, first_dimension_.size()); return first_dimension_[i]; } // get a range of indexes for the second dimension of i-th block(task) Range1d GetRange(size_t i) const { CHECK_LT(i, ranges_.size()); return ranges_[i]; } private: void AddBlock(size_t first_dimension, size_t begin, size_t end) { first_dimension_.push_back(first_dimension); ranges_.emplace_back(begin, end); } std::vector<Range1d> ranges_; std::vector<size_t> first_dimension_; }; // Wrapper to implement nested parallelism with simple omp parallel for template <typename Func> void ParallelFor2d(const BlockedSpace2d& space, int nthreads, Func func) { const size_t num_blocks_in_space = space.Size(); nthreads = std::min(nthreads, omp_get_max_threads()); nthreads = std::max(nthreads, 1); dmlc::OMPException exc; #pragma omp parallel num_threads(nthreads) { exc.Run([&]() { size_t tid = omp_get_thread_num(); size_t chunck_size = num_blocks_in_space / nthreads + !!(num_blocks_in_space % nthreads); size_t begin = chunck_size * tid; size_t end = std::min(begin + chunck_size, num_blocks_in_space); for (auto i = begin; i < end; i++) { func(space.GetFirstDimension(i), space.GetRange(i)); } }); } exc.Rethrow(); } /** * OpenMP schedule */ struct Sched { enum { kAuto, kDynamic, kStatic, kGuided, } sched; size_t chunk{0}; Sched static Auto() { return Sched{kAuto}; } Sched static Dyn(size_t n = 0) { return Sched{kDynamic, n}; } Sched static Static(size_t n = 0) { return Sched{kStatic, n}; } Sched static Guided() { return Sched{kGuided}; } }; template <typename Index, typename Func> void ParallelFor(Index size, size_t n_threads, Sched sched, Func fn) { #if defined(_MSC_VER) // msvc doesn't support unsigned integer as openmp index. using OmpInd = std::conditional_t<std::is_signed<Index>::value, Index, omp_ulong>; #else using OmpInd = Index; #endif OmpInd length = static_cast<OmpInd>(size); dmlc::OMPException exc; switch (sched.sched) { case Sched::kAuto: { #pragma omp parallel for num_threads(n_threads) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } break; } case Sched::kDynamic: { if (sched.chunk == 0) { #pragma omp parallel for num_threads(n_threads) schedule(dynamic) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } else { #pragma omp parallel for num_threads(n_threads) schedule(dynamic, sched.chunk) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } break; } case Sched::kStatic: { if (sched.chunk == 0) { #pragma omp parallel for num_threads(n_threads) schedule(static) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } else { #pragma omp parallel for num_threads(n_threads) schedule(static, sched.chunk) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } break; } case Sched::kGuided: { #pragma omp parallel for num_threads(n_threads) schedule(guided) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } break; } } exc.Rethrow(); } template <typename Index, typename Func> void ParallelFor(Index size, size_t n_threads, Func fn) { ParallelFor(size, n_threads, Sched::Static(), fn); } // FIXME(jiamingy): Remove this function to get rid of `omp_set_num_threads`, which sets a // global variable in runtime and affects other programs in the same process. template <typename Index, typename Func> void ParallelFor(Index size, Func fn) { ParallelFor(size, omp_get_max_threads(), Sched::Static(), fn); } /* \brief Configure parallel threads. * * \param p_threads Number of threads, when it's less than or equal to 0, this function * will change it to number of process on system. * * \return Global openmp max threads before configuration. */ inline int32_t OmpSetNumThreads(int32_t* p_threads) { auto& threads = *p_threads; int32_t nthread_original = omp_get_max_threads(); if (threads <= 0) { threads = omp_get_num_procs(); } omp_set_num_threads(threads); return nthread_original; } inline int32_t OmpSetNumThreadsWithoutHT(int32_t* p_threads) { auto& threads = *p_threads; int32_t nthread_original = omp_get_max_threads(); if (threads <= 0) { threads = nthread_original; } omp_set_num_threads(threads); return nthread_original; } inline int32_t OmpGetNumThreads(int32_t n_threads) { if (n_threads <= 0) { n_threads = omp_get_num_procs(); } return n_threads; } } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_THREADING_UTILS_H_
GB_unaryop__identity_uint32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint32_fp32 // op(A') function: GB_tran__identity_uint32_fp32 // C type: uint32_t // A type: float // cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z ; GB_CAST_UNSIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint32_fp32 ( uint32_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__div_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__div_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__div_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__div_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint64) // A*D function (colscale): GB (_AxD__div_uint64) // D*A function (rowscale): GB (_DxB__div_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__div_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__div_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint64) // C=scalar+B GB (_bind1st__div_uint64) // C=scalar+B' GB (_bind1st_tran__div_uint64) // C=A+scalar GB (_bind2nd__div_uint64) // C=A'+scalar GB (_bind2nd_tran__div_uint64) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 64) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (x, y, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT64 || GxB_NO_DIV_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__div_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__div_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__div_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 64) ; \ } GrB_Info GB (_bind1st_tran__div_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 64) ; \ } GrB_Info GB (_bind2nd_tran__div_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__le_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_bool) // A.*B function (eWiseMult): GB (_AemultB_08__le_bool) // A.*B function (eWiseMult): GB (_AemultB_02__le_bool) // A.*B function (eWiseMult): GB (_AemultB_04__le_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_bool) // A*D function (colscale): GB (_AxD__le_bool) // D*A function (rowscale): GB (_DxB__le_bool) // C+=B function (dense accum): GB (_Cdense_accumB__le_bool) // C+=b function (dense accum): GB (_Cdense_accumb__le_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_bool) // C=scalar+B GB (_bind1st__le_bool) // C=scalar+B' GB (_bind1st_tran__le_bool) // C=A+scalar GB (_bind2nd__le_bool) // C=A'+scalar GB (_bind2nd_tran__le_bool) // C type: bool // A type: bool // A pattern? 0 // B type: bool // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ bool aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ bool bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_BOOL || GxB_NO_LE_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__le_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_bool) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; bool alpha_scalar ; bool beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((bool *) alpha_scalar_in)) ; beta_scalar = (*((bool *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__le_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__le_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
reduction_modifier.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int main() { int a, b, c; #pragma omp parallel reduction (inscan, +:a, b) reduction (-:a, c) { printf("This is for testing parser and AST construction, which could be only syntax correct.\n"); } #pragma omp parallel reduction (task, +:a, b) reduction (task, +:a, c) { printf("This is for testing parser and AST construction, which could be only syntax correct.\n"); } #pragma omp parallel reduction (default, -:a, b) reduction (inscan, -:a, c) { printf("This is for testing parser and AST construction, which could be only syntax correct.\n"); } return 0; }
templatemath.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ /* * templatemath.h * * Created on: Jan 1, 2016 * Author: agibsonccc */ #ifndef TEMPLATEMATH_H_ #define TEMPLATEMATH_H_ #include <dll.h> #include <pointercast.h> #include <platformmath.h> #define BFLOAT16_MAX_VALUE 32737. #define HALF_MAX_VALUE 65504. #define FLOAT_MAX_VALUE 3.4028235E38 #define DOUBLE_MAX_VALUE 1.7976931348623157E308 #define FLOAT_MIN_NORMAL 1.17549435e-38 #ifndef M_E #define M_E 2.718281828459 #endif namespace nd4j { #ifdef __CUDACC__ #endif namespace math { template<typename T> math_def inline T nd4j_abs(T value); template<typename T> math_def inline void nd4j_swap(T &val1, T &val2); template<typename T> math_def inline T nd4j_max(T val1, T val2); template<typename T> math_def inline T nd4j_min(T val1, T val2); template<typename T, typename Z> math_def inline Z nd4j_re(T val1, T val2); template<typename T, typename Z> math_def inline Z nd4j_rint(T val1); template<typename T, typename Z> math_def inline Z nd4j_copysign(T val1, T val2); //#ifndef __CUDACC__ template<typename X, typename Y, typename Z> math_def inline Z nd4j_dot(X *x, Y *y, int length); //#endif template<typename T, typename Z> math_def inline Z nd4j_ceil(T val1); template<typename T> math_def inline bool nd4j_isnan(T val1); template<typename T> math_def inline bool nd4j_isinf(T val1); template<typename T> math_def inline bool nd4j_isfin(T val1); template<typename T, typename Z> math_def inline Z nd4j_cos(T val); template<typename T, typename Z> math_def inline Z nd4j_cosh(T val); template<typename X, typename Z> math_def inline Z nd4j_exp(X val); template<typename T, typename Z> math_def inline Z nd4j_floor(T val); template<typename X, typename Z> math_def inline Z nd4j_log(X val); template<typename X, typename Y, typename Z> math_def inline Z nd4j_pow(X val, Y val2); template<typename T, typename Z> math_def inline Z nd4j_round(T val); template<typename X, typename Y, typename Z> math_def inline Z nd4j_remainder(X num, Y denom); template<typename X, typename Y, typename Z> math_def inline Z nd4j_fmod(X num, Y denom); template<typename T, typename Z> math_def inline Z nd4j_erf(T num); template<typename T, typename Z> math_def inline Z nd4j_erfc(T num); template<typename T, typename Z> math_def inline Z nd4j_sigmoid(T val) { return (Z) 1.0f / ((Z) 1.0f + nd4j_exp<T, Z>(-val)); } template<typename T, typename Z> math_def inline Z nd4j_elu(T val) { if (val >= (T) 0.f) return val; else return nd4j_exp<T, Z>(val) - (Z) 1.0f; //return val >= 0.0 ? val : (nd4j_exp<T>(val) - 1.0); } template<typename T, typename Z> math_def inline Z nd4j_leakyrelu(T val,T alpha) { if (val < (T) 0.0f) return alpha * val; else return val; } template<typename T, typename Z> math_def inline Z nd4j_eluderivative(T val) { if (val >= (T) 0.0f) return (Z) 1.0f; else return nd4j_exp<T, Z>(val); //return val >= 0.0 ? 1.0 : nd4j_exp(val); } template<typename T, typename Z> math_def inline Z nd4j_sin(T val); template<typename T, typename Z> math_def inline Z nd4j_sinh(T val); template<typename T, typename Z> math_def inline Z softplus(T val) { return nd4j_log<T, Z>((Z) 1.0f + nd4j_exp<T, Z>(val)); } template<typename T, typename Z> math_def inline Z nd4j_softsign(T val) { return val / ((T) 1.0f + nd4j::math::nd4j_abs<T>(val)); } template<typename X, typename Z> math_def inline Z nd4j_sqrt(X val); template<typename X, typename Z> math_def inline Z nd4j_tanh(X val); template<typename T, typename Z> math_def inline Z nd4j_tan(T val); template<typename X, typename Z> math_def inline Z nd4j_atan2(X val1, X val2); template<typename X, typename Z> math_def inline Z nd4j_atan2(X val1, X val2) { return p_atan2<Z>(static_cast<Z>(val1), static_cast<Z>(val2)); } template<typename T, typename Z> math_def inline Z nd4j_tan(T tval) { return p_tan<Z>(static_cast<Z>(tval)); } template<typename T, typename Z> math_def inline Z nd4j_tanhderivative(T val) { Z tanh = nd4j_tanh<T,Z>(val); return (Z) 1.0f - tanh * tanh; } template <typename T, typename Z> math_def inline T nd4j_sigmoidderivative(T val) { Z sigmoid = nd4j_sigmoid<T,Z>(val); return sigmoid * ((Z) 1.0f - sigmoid); } template<typename T, typename Z> math_def inline T nd4j_softsignderivative(T val) { T y = (T) 1.0f + nd4j_abs(val); return (Z) 1.0f / (y * y); } template<typename T, typename Z> math_def inline T nd4j_sgn(T val) { return val < (T) 0.0f ? (Z) -1.0f : val > (T) 0.0f ? (Z) 1.0f : (Z) 0.0f; } template<typename T, typename Z> math_def inline Z nd4j_sign(T val) { return nd4j_sgn<T, Z>(val); } template<typename T, typename Z> math_def inline Z nd4j_signum(T val) { return nd4j_sgn<T, Z>(val); } //#ifndef __CUDACC__ /* template<> math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) { float16 dot = (float16) 0.0f; // TODO: since we can't use simd on unions, we might use something else here. for(int e = 0; e < length; e++) { dot += x[e] * y[e]; } return dot; } */ template<typename X, typename Y, typename Z> math_def inline Z nd4j_dot(X *x, Y *y, int length) { Z dot = (Z)0.0f; //#pragma omp simd reduction(+:dot) for(int e = 0; e < length; e++) { dot += static_cast<Z>(x[e]) * static_cast<Z>(y[e]); } return dot; } //#endif template<typename T, typename Z> math_def inline Z nd4j_acos(T val); template<typename T, typename Z> math_def inline Z nd4j_acosh(T val); template<typename T, typename Z> math_def inline Z nd4j_asin(T val); template<typename T, typename Z> math_def inline Z nd4j_asinh(T val); template<typename T, typename Z> math_def inline Z nd4j_asinh(T val) { //Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x) return nd4j_log<Z, Z>(nd4j_sqrt<Z, Z>(nd4j_pow<T,T,Z>(val, (T) 2) + (Z) 1.f) + (Z) val); } template<typename T, typename Z> math_def inline Z nd4j_atan(T val); template<typename T, typename Z> math_def inline Z nd4j_atanh(T val); template<> math_def inline float16 nd4j_abs<float16>(float16 value) { #ifdef NATIVE_HALFS if (value < (float16) 0.f) { return float16(__hneg(value.data)); } else return value; #else return (float16) fabsf((float) value); #endif } template<> math_def inline bfloat16 nd4j_abs<bfloat16>(bfloat16 value) { return (bfloat16) fabsf((float) value); } template<> math_def inline float nd4j_abs<float>(float value) { return fabsf(value); } template<> math_def inline double nd4j_abs<double>(double value) { return fabs(value); } template<> math_def inline int nd4j_abs<int>(int value) { return abs(value); } template<> math_def inline Nd4jLong nd4j_abs<Nd4jLong>(Nd4jLong value) { return llabs(value); } template<> math_def inline bool nd4j_abs<bool>(bool value) { return value; } template<> math_def inline uint8_t nd4j_abs<uint8_t>(uint8_t value) { return value; } template<> math_def inline uint16_t nd4j_abs<uint16_t>(uint16_t value) { return value; } template<> math_def inline uint32_t nd4j_abs<uint32_t>(uint32_t value) { return value; } template<> math_def inline Nd4jULong nd4j_abs<Nd4jULong>(Nd4jULong value) { return value; } template<> math_def inline int8_t nd4j_abs<int8_t>(int8_t value) { return value < 0 ? -value : value; } template<> math_def inline int16_t nd4j_abs<int16_t>(int16_t value) { return value < 0 ? -value : value; } template<> math_def inline bool nd4j_isnan<float16>(float16 value) { return *(value.data.getXP()) == 0x7fffU; } template<> math_def inline bool nd4j_isnan<bfloat16>(bfloat16 value) { return value == bfloat16::nan(); //0x7fffU; } template<> math_def inline bool nd4j_isnan<float>(float value) { return value != value; } template<> math_def inline bool nd4j_isnan<double>(double value) { return value != value; } template<> math_def inline bool nd4j_isnan<int>(int value) { return false; } template<> math_def inline bool nd4j_isnan<uint32_t>(uint32_t value) { return false; } template<> math_def inline bool nd4j_isnan<uint16_t>(uint16_t value) { return false; } template<> math_def inline bool nd4j_isnan<uint8_t>(uint8_t value) { return false; } template<> math_def inline bool nd4j_isnan<int16_t>(int16_t value) { return false; } template<> math_def inline bool nd4j_isnan<int8_t>(int8_t value) { return false; } template<> math_def inline bool nd4j_isnan<bool>(bool value) { return false; } template<> math_def inline bool nd4j_isnan<Nd4jLong>(Nd4jLong value) { return false; } template<> math_def inline bool nd4j_isnan<Nd4jULong>(Nd4jULong value) { return false; } template<> math_def inline bool nd4j_isinf<float16>(float16 value) { return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<bfloat16>(bfloat16 value) { return value < (bfloat16) -BFLOAT16_MAX_VALUE || value > (bfloat16) BFLOAT16_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<float>(float value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<double>(double value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<int>(int value) { return false; } template<> math_def inline bool nd4j_isinf<uint32_t>(uint32_t value) { return false; } template<> math_def inline bool nd4j_isinf<uint16_t>(uint16_t value) { return false; } template<> math_def inline bool nd4j_isinf<uint8_t>(uint8_t value) { return false; } template<> math_def inline bool nd4j_isinf<int16_t>(int16_t value) { return false; } template<> math_def inline bool nd4j_isinf<int8_t>(int8_t value) { return false; } template<> math_def inline bool nd4j_isinf<bool>(bool value) { return false; } template<> math_def inline bool nd4j_isinf<Nd4jLong>(Nd4jLong value) { return false; } template<> math_def inline bool nd4j_isinf<Nd4jULong>(Nd4jULong value) { return false; } template<typename T> math_def inline bool nd4j_isfin(T value) { return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value); } template<> math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) { return (float16) copysignf((float) val1, (float) val2); } template<> math_def inline float nd4j_copysign<float>(float val1, float val2) { return copysignf(val1, val2); } template<> math_def inline double nd4j_copysign<double>(double val1, double val2) { return copysign(val1, val2); } template<> math_def inline int nd4j_copysign<int>(int val1, int val2) { if (val2 < 0) return -(nd4j_abs<int>(val1)); else return nd4j_abs<int>(val1); } template<> math_def inline Nd4jLong nd4j_copysign<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) { if (val2 < 0) return -(nd4j_abs<Nd4jLong>(val1)); else return nd4j_abs<Nd4jLong>(val1); } template<> math_def inline bool nd4j_max(bool val1, bool val2) { return (val1 || val2) ? true : false; } template<typename T> math_def inline T nd4j_max(T val1, T val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline bool nd4j_min(bool val1, bool val2) { return (val1 && val2) ? true : false; } template<typename T> math_def inline T nd4j_min(T val1, T val2) { return val1 < val2 ? val1 : val2; } template <typename X, typename Z> math_def inline Z nd4j_ceil(X val) { return static_cast<Z>(p_ceil<X>(val)); } template <typename X, typename Z> math_def inline Z nd4j_round(X val) { return static_cast<Z>(p_round<X>(val)); } template <typename X, typename Z> math_def inline Z nd4j_asin(X val) { return p_asin<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_atan(X val) { return p_atan<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_atanh(X val) { return p_atanh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_cosh(X val) { return p_cosh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_rint(X val) { return p_rint<X>(val); } template <typename X, typename Z> math_def inline Z nd4j_sinh(X val) { return p_sinh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_acos(X val) { return p_acos<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_acosh(X val) { return p_acosh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_cos(X val) { return p_cos<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_exp(X val) { return p_exp<X>(val); } template<typename X, typename Z> math_def inline Z nd4j_floor(X val) { return static_cast<Z>(p_floor<X>(val)); } template<typename X, typename Z> math_def inline Z nd4j_log(X val) { return static_cast<Z>(p_log<X>(val)); } /** * This func is special case - it must return floating point value, and optionally Y arg can be floating point argument * @tparam X * @tparam Y * @tparam Z * @param val * @param val2 * @return */ template <typename X, typename Y, typename Z> math_def inline Z nd4j_pow(X val, Y val2) { return p_pow<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template<typename T> math_def inline T nd4j_re(T val1, T val2) { if (val1 == (T) 0.0f && val2 == (T) 0.0f) return (T) 0.0f; return nd4j_abs<T>(val1 - val2) / (nd4j_abs<T>(val1) + nd4j_abs<T>(val2)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_remainder(X val, Y val2) { return p_remainder<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_fmod(X val, Y val2) { return p_fmod<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template <typename X, typename Z> math_def inline Z nd4j_sin(X val) { return p_sin<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_sqrt(X val) { return p_sqrt<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_tanh(X val) { return p_tanh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_erf(X val) { return p_erf<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_erfc(X val) { return p_erfc<Z>(static_cast<Z>(val)); } template<typename T> math_def inline void nd4j_swap(T &val1, T &val2) { T temp = val1; val1=val2; val2=temp; }; #ifdef __CUDACC__ namespace atomics { template <typename T> inline __device__ T nd4j_atomicAdd(T* address, T val); template <typename T> inline __device__ T nd4j_atomicSub(T* address, T val); template <typename T> inline __device__ T nd4j_atomicMul(T* address, T val); template <typename T> inline __device__ T nd4j_atomicDiv(T* address, T val); template <> inline __device__ double nd4j_atomicAdd<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) { int* address_as_ull = (int*) address; long addr = (long) address; bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (addr - 2); PAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { float16 res = ((float16) old.B.H) + val; fresh.B.H = res.data; fresh.B.L = old.B.L; } else { float16 res = ((float16) old.B.L) + val; fresh.B.L = res.data; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <> inline __device__ bfloat16 nd4j_atomicAdd<bfloat16>(bfloat16* address, bfloat16 val) { int* address_as_ull = (int*) address; long addr = (long)(address); bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (addr - 2); BPAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { bfloat16 res = old.B.H + val; fresh.B.H = res; fresh.B.L = old.B.L; } else { bfloat16 res = old.B.L + val; fresh.B.L = res; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <> inline __device__ double nd4j_atomicSub<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val - __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ double nd4j_atomicMul<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val * __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ double nd4j_atomicDiv<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val / __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ float nd4j_atomicAdd<float>(float* address, float val) { return atomicAdd(address,val); } template <> inline __device__ float nd4j_atomicSub<float>(float* address, float val) { int* address_as_ull = (int*) address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val - __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ float nd4j_atomicMul<float>(float* address, float val) { int* address_as_ull = ( int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val * __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ float nd4j_atomicDiv<float>(float* address, float val) { int* address_as_ull = (int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val * __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } } #endif } } #endif /* TEMPLATEMATH_H_ */
meta.h
#include <iostream> #include <vector> #include <string> #include <omp.h> #include "formula.h" using namespace std; template<int N> class formula_generator{ public: static inline void EXEC(ResultsWriter & writer, int elcount, int minimum[], int maximum[], double masses[], int *current, double pre_mass, double loMass, double hiMass, int k) { int c = min(int((hiMass-pre_mass)/masses[N-1]),maximum[N-1]); if (N == elcount) { #pragma omp parallel for for (k = minimum[N-1]; k<=c; k++) { current[N-1] = k; double current_mass_i= pre_mass+masses[N-1]*k; int *current_i = new int [elcount]; for (int i=0;i<elcount;i++) { current_i[i]=current[i]; } formula_generator<N-1>::EXEC(writer, elcount, minimum, maximum, masses, current_i, current_mass_i, loMass, hiMass, k); delete []current_i; } } else { for (int i = minimum[N-1]; i<=c; i++) { current[N-1] = i; double current_mass_i= pre_mass+masses[N-1]*i; formula_generator<N-1>::EXEC(writer, elcount, minimum, maximum, masses, current, current_mass_i, loMass, hiMass, k); } } } }; template<> class formula_generator<0>{ public: static inline void EXEC(ResultsWriter & writer, int elcount, int minimum[], int maximum[], double masses[], int *current, double pre_mass, double loMass, double hiMass, int k) { if (pre_mass >= loMass && pre_mass <= hiMass ) { #pragma omp critical (p_result) { for ( int i = 0; i < elcount; ++i) { if (i != elcount-1) { writer.p_result->data[writer.p_result->len*elcount+i] = current[i]; } else { writer.p_result->data[writer.p_result->len*elcount+i] = k; } } writer.p_result->mass[writer.p_result->len] = pre_mass; writer.p_result->len++; if (writer.p_result->len==writer.m_poolSize) { writer.writeResults(); writer.p_result->len = 0; } } } } };
GB_unop__exp2_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__exp2_fc64_fc64 // op(A') function: GB_unop_tran__exp2_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cexp2 (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cexp2 (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cexp2 (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP2 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__exp2_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexp2 (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexp2 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__exp2_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__minv_int64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int64_int8 // op(A') function: GB_tran__minv_int64_int8 // C type: int64_t // A type: int8_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 64) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int64_int8 ( int64_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fields_values.c
// RUN: %libomp-compile-and-run // REQUIRES: !abt #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #define XSTR(x) #x #define STR(x) XSTR(x) #define streqls(s1, s2) (!strcmp(s1, s2)) #define check(condition) \ if (!(condition)) { \ fprintf(stderr, "error: %s: %d: " STR(condition) "\n", __FILE__, \ __LINE__); \ exit(1); \ } #if defined(_WIN32) #include <windows.h> #define getpid _getpid typedef int pid_t; #define gettid GetCurrentThreadId #define my_gethostname(buf, sz) GetComputerNameA(buf, &(sz)) #else #include <unistd.h> #include <sys/types.h> #define my_gethostname(buf, sz) gethostname(buf, sz) #endif #define BUFFER_SIZE 256 int get_integer() { int n, retval; char buf[BUFFER_SIZE]; size_t needed = omp_capture_affinity(buf, BUFFER_SIZE, NULL); check(needed < BUFFER_SIZE); n = sscanf(buf, "%d", &retval); check(n == 1); return retval; } char* get_string() { int n, retval; char buf[BUFFER_SIZE]; size_t needed = omp_capture_affinity(buf, BUFFER_SIZE, NULL); check(needed < BUFFER_SIZE); return strdup(buf); } void check_integer(const char* formats[2], int(*func)()) { int i; for (i = 0; i < 2; ++i) { omp_set_affinity_format(formats[i]); #pragma omp parallel num_threads(8) { check(get_integer() == func()); #pragma omp parallel num_threads(3) { check(get_integer() == func()); } check(get_integer() == func()); } } } void check_nesting_level() { // Check %{nesting_level} and %L const char* formats[2] = {"%{nesting_level}", "%L"}; check_integer(formats, omp_get_level); } void check_thread_num() { // Check %{thread_num} and %n const char* formats[2] = {"%{thread_num}", "%n"}; check_integer(formats, omp_get_thread_num); } void check_num_threads() { // Check %{num_threads} and %N const char* formats[2] = {"%{num_threads}", "%N"}; check_integer(formats, omp_get_num_threads); } int ancestor_helper() { return omp_get_ancestor_thread_num(omp_get_level() - 1); } void check_ancestor_tnum() { // Check %{ancestor_tnum} and %a const char* formats[2] = {"%{ancestor_tnum}", "%a"}; check_integer(formats, ancestor_helper); } int my_get_pid() { return (int)getpid(); } void check_process_id() { // Check %{process_id} and %P const char* formats[2] = {"%{process_id}", "%P"}; check_integer(formats, my_get_pid); } /* int my_get_tid() { return (int)gettid(); } void check_native_thread_id() { // Check %{native_thread_id} and %i const char* formats[2] = {"%{native_thread_id}", "%i"}; check_integer(formats, my_get_tid); } */ void check_host() { int i; int buffer_size = 256; const char* formats[2] = {"%{host}", "%H"}; char hostname[256]; my_gethostname(hostname, buffer_size); for (i = 0; i < 2; ++i) { omp_set_affinity_format(formats[i]); #pragma omp parallel num_threads(8) { char* host = get_string(); check(streqls(host, hostname)); free(host); } } } void check_undefined() { int i; const char* formats[2] = {"%{foobar}", "%X"}; for (i = 0; i < 2; ++i) { omp_set_affinity_format(formats[i]); #pragma omp parallel num_threads(8) { char* undef = get_string(); check(streqls(undef, "undefined")); free(undef); } } } int main(int argc, char** argv) { omp_set_nested(1); check_nesting_level(); check_num_threads(); check_ancestor_tnum(); check_process_id(); //check_native_thread_id(); check_host(); check_undefined(); return 0; }
6edef_gcc_so8.c
#define _POSIX_C_SOURCE 200809L #define START_TIMER(S) \ struct timeval start_##S, end_##S; \ gettimeofday(&start_##S, NULL); #define STOP_TIMER(S, T) \ gettimeofday(&end_##S, NULL); \ T->S += (double)(end_##S.tv_sec - start_##S.tv_sec) + (double)(end_##S.tv_usec - start_##S.tv_usec) / 1000000; #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; }; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_id_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw); int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict sp_source_id_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine, struct profiler *timers) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict sp_source_id)[sp_source_id_vec->size[1]][sp_source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_id_vec->size[1]][sp_source_id_vec->size[2]])sp_source_id_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); //for (int time = time_m, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3)) //{ int sf = 4; int t_blk_size = 2 * sf * (time_M - time_m); START_TIMER(section0) for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t1 = (time + 2) % (3), t0 = (time) % (3), t2 = (time + 1) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); bf0(damp_vec, dt, u_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_id_vec, save_src_u_vec, source_id_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, time, tw); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); } } } /* End section0 */ } STOP_TIMER(section0, timers) for (int time = time_m, t2 = (time + 1) % (3); time <= time_M; time += 1, t2 = (time + 1) % (3)) { START_TIMER(section1) /* Begin section1 */ /* End section1 */ STOP_TIMER(section1, timers) } return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_id_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw) { float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict sp_source_id)[sp_source_id_vec->size[1]][sp_source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_id_vec->size[1]][sp_source_id_vec->size[2]])sp_source_id_vec->data; if (x0_blk0_size == 0 || y0_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, u, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r8 = 1.0/dt; float r7 = 1.0/(dt*dt); float r6 = 1.0/(vp[x - time + 8][y - time + 8][z + 8]*vp[x - time + 8][y - time + 8][z + 8]); u[t2][x - time + 8][y - time + 8][z + 8] = (r6*(-r7*(-2.0F*u[t0][x - time + 8][y - time + 8][z + 8] + u[t1][x - time + 8][y - time + 8][z + 8])) + r8*(damp[x - time + 1][y - time + 1][z + 1]*u[t0][x - time + 8][y - time + 8][z + 8]) - 7.93650813e-6F*(u[t0][x - time + 4][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 4][z + 8] + u[t0][x - time + 8][y - time + 8][z + 4] + u[t0][x - time + 8][y - time + 8][z + 12] + u[t0][x - time + 8][y - time + 12][z + 8] + u[t0][x - time + 12][y - time + 8][z + 8]) + 1.12874782e-4F*(u[t0][x - time + 5][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 5][z + 8] + u[t0][x - time + 8][y - time + 8][z + 5] + u[t0][x - time + 8][y - time + 8][z + 11] + u[t0][x - time + 8][y - time + 11][z + 8] + u[t0][x - time + 11][y - time + 8][z + 8]) - 8.8888891e-4F*(u[t0][x - time + 6][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 6][z + 8] + u[t0][x - time + 8][y - time + 8][z + 6] + u[t0][x - time + 8][y - time + 8][z + 10] + u[t0][x - time + 8][y - time + 10][z + 8] + u[t0][x - time + 10][y - time + 8][z + 8]) + 7.11111128e-3F*(u[t0][x - time + 7][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 7][z + 8] + u[t0][x - time + 8][y - time + 8][z + 7] + u[t0][x - time + 8][y - time + 8][z + 9] + u[t0][x - time + 8][y - time + 9][z + 8] + u[t0][x - time + 9][y - time + 8][z + 8]) - 3.79629639e-2F*u[t0][x - time + 8][y - time + 8][z + 8])/(r6*r7 + r8*damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, u, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_id[x - time][y - time][sp_zi]; float r0 = save_src_u[tw][source_id[x - time][y - time][zind]]; u[t2][x - time + 8][y - time + 8][zind + 8] += r0; } } } } } } }
bli_trsm_ref.c
/* BLIS An object-based framework for developing high-performance BLAS-like libraries. Copyright (C) 2014, The University of Texas at Austin Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name(s) of the copyright holder(s) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "blis.h" #if 0 // An implementation that attempts to facilitate emission of vectorized // instructions via constant loop bounds + #pragma omp simd directives. #undef GENTFUNC #define GENTFUNC( ctype, ch, opname, arch, suf, mr, nr ) \ \ void PASTEMAC3(ch,opname,arch,suf) \ ( \ ctype* restrict a, \ ctype* restrict b, \ ctype* restrict c, inc_t rs_c, inc_t cs_c, \ auxinfo_t* restrict data, \ cntx_t* restrict cntx \ ) \ { \ const inc_t rs_a = 1; \ const inc_t cs_a = mr; \ \ const inc_t rs_b = nr; \ const inc_t cs_b = 1; \ \ _Pragma( "omp simd" ) \ for ( dim_t i = 0; i < mr; ++i ) \ { \ /* b1 = b1 - a10t * B0; */ \ /* b1 = b1 / alpha11; */ \ for ( dim_t j = 0; j < nr; ++j ) \ { \ ctype beta11c = b[i*rs_b + j*cs_b]; \ ctype rho11; \ \ /* beta11 = beta11 - a10t * b01; */ \ PASTEMAC(ch,set0s)( rho11 ); \ for ( dim_t l = 0; l < i; ++l ) \ { \ PASTEMAC(ch,axpys)( a[i*rs_a + l*cs_a], \ b[l*rs_b + j*cs_b], rho11 ); \ } \ PASTEMAC(ch,subs)( rho11, beta11c ); \ \ /* beta11 = beta11 / alpha11; */ \ /* NOTE: The INVERSE of alpha11 (1.0/alpha11) is stored instead of alpha11, so we can multiply rather than divide. We store the inverse of alpha11 intentionally to avoid expensive division instructions within the micro-kernel. */ \ PASTEMAC(ch,scals)( a[i*rs_a + i*cs_a], beta11c ); \ \ /* Output final result to matrix c. */ \ PASTEMAC(ch,copys)( beta11c, c[i*rs_c + j*cs_c] ); \ \ /* Store the local value back to b11. */ \ PASTEMAC(ch,copys)( beta11c, b[i*rs_b + j*cs_b] ); \ } \ } \ } //INSERT_GENTFUNC_BASIC2( trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX ) GENTFUNC( float, s, trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 16 ) GENTFUNC( double, d, trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 8 ) GENTFUNC( scomplex, c, trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 8 ) GENTFUNC( dcomplex, z, trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 4 ) #undef GENTFUNC #define GENTFUNC( ctype, ch, opname, arch, suf, mr, nr ) \ \ void PASTEMAC3(ch,opname,arch,suf) \ ( \ ctype* restrict a, \ ctype* restrict b, \ ctype* restrict c, inc_t rs_c, inc_t cs_c, \ auxinfo_t* restrict data, \ cntx_t* restrict cntx \ ) \ { \ const inc_t rs_a = 1; \ const inc_t cs_a = mr; \ \ const inc_t rs_b = nr; \ const inc_t cs_b = 1; \ \ _Pragma( "omp simd" ) \ for ( dim_t iter = 0; iter < mr; ++iter ) \ { \ dim_t i = mr - iter - 1; \ \ /* b1 = b1 - a12t * B2; */ \ /* b1 = b1 / alpha11; */ \ for ( dim_t j = 0; j < nr; ++j ) \ { \ ctype beta11c = b[i*rs_b + j*cs_b]; \ ctype rho11; \ \ /* beta11 = beta11 - a12t * b21; */ \ PASTEMAC(ch,set0s)( rho11 ); \ for ( dim_t l = 0; l < iter; ++l ) \ { \ PASTEMAC(ch,axpys)( a[i*rs_a + (i+1+l)*cs_a], \ b[(i+1+l)*rs_b + j*cs_b], rho11 ); \ } \ PASTEMAC(ch,subs)( rho11, beta11c ); \ \ /* beta11 = beta11 / alpha11; */ \ /* NOTE: The INVERSE of alpha11 (1.0/alpha11) is stored instead of alpha11, so we can multiply rather than divide. We store the inverse of alpha11 intentionally to avoid expensive division instructions within the micro-kernel. */ \ PASTEMAC(ch,scals)( a[i*rs_a + i*cs_a], beta11c ); \ \ /* Output final result to matrix c. */ \ PASTEMAC(ch,copys)( beta11c, c[i*rs_c + j*cs_c] ); \ \ /* Store the local value back to b11. */ \ PASTEMAC(ch,copys)( beta11c, b[i*rs_b + j*cs_b] ); \ } \ } \ } //INSERT_GENTFUNC_BASIC2( trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX ) GENTFUNC( float, s, trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 16 ) GENTFUNC( double, d, trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 8 ) GENTFUNC( scomplex, c, trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 8 ) GENTFUNC( dcomplex, z, trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 4 ) #else // An implementation that uses variable loop bounds (queried from the context) // and makes no use of #pragma omp simd. #undef GENTFUNC #define GENTFUNC( ctype, ch, opname, arch, suf ) \ \ void PASTEMAC3(ch,opname,arch,suf) \ ( \ ctype* restrict a, \ ctype* restrict b, \ ctype* restrict c, inc_t rs_c, inc_t cs_c, \ auxinfo_t* restrict data, \ cntx_t* restrict cntx \ ) \ { \ const num_t dt = PASTEMAC(ch,type); \ \ const dim_t mr = bli_cntx_get_blksz_def_dt( dt, BLIS_MR, cntx ); \ const dim_t nr = bli_cntx_get_blksz_def_dt( dt, BLIS_NR, cntx ); \ \ const inc_t packmr = bli_cntx_get_blksz_max_dt( dt, BLIS_MR, cntx ); \ const inc_t packnr = bli_cntx_get_blksz_max_dt( dt, BLIS_NR, cntx ); \ \ const dim_t m = mr; \ const dim_t n = nr; \ \ const inc_t rs_a = 1; \ const inc_t cs_a = packmr; \ \ const inc_t rs_b = packnr; \ const inc_t cs_b = 1; \ \ dim_t iter, i, j, l; \ dim_t n_behind; \ \ for ( iter = 0; iter < m; ++iter ) \ { \ i = iter; \ n_behind = i; \ \ ctype* restrict alpha11 = a + (i )*rs_a + (i )*cs_a; \ ctype* restrict a10t = a + (i )*rs_a + (0 )*cs_a; \ ctype* restrict B0 = b + (0 )*rs_b + (0 )*cs_b; \ ctype* restrict b1 = b + (i )*rs_b + (0 )*cs_b; \ \ /* b1 = b1 - a10t * B0; */ \ /* b1 = b1 / alpha11; */ \ for ( j = 0; j < n; ++j ) \ { \ ctype* restrict b01 = B0 + (0 )*rs_b + (j )*cs_b; \ ctype* restrict beta11 = b1 + (0 )*rs_b + (j )*cs_b; \ ctype* restrict gamma11 = c + (i )*rs_c + (j )*cs_c; \ ctype beta11c = *beta11; \ ctype rho11; \ \ /* beta11 = beta11 - a10t * b01; */ \ PASTEMAC(ch,set0s)( rho11 ); \ for ( l = 0; l < n_behind; ++l ) \ { \ ctype* restrict alpha10 = a10t + (l )*cs_a; \ ctype* restrict beta01 = b01 + (l )*rs_b; \ \ PASTEMAC(ch,axpys)( *alpha10, *beta01, rho11 ); \ } \ PASTEMAC(ch,subs)( rho11, beta11c ); \ \ /* beta11 = beta11 / alpha11; */ \ /* NOTE: The INVERSE of alpha11 (1.0/alpha11) is stored instead of alpha11, so we can multiply rather than divide. We store the inverse of alpha11 intentionally to avoid expensive division instructions within the micro-kernel. */ \ PASTEMAC(ch,scals)( *alpha11, beta11c ); \ \ /* Output final result to matrix c. */ \ PASTEMAC(ch,copys)( beta11c, *gamma11 ); \ \ /* Store the local value back to b11. */ \ PASTEMAC(ch,copys)( beta11c, *beta11 ); \ } \ } \ } INSERT_GENTFUNC_BASIC2( trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX ) #undef GENTFUNC #define GENTFUNC( ctype, ch, opname, arch, suf ) \ \ void PASTEMAC3(ch,opname,arch,suf) \ ( \ ctype* restrict a, \ ctype* restrict b, \ ctype* restrict c, inc_t rs_c, inc_t cs_c, \ auxinfo_t* restrict data, \ cntx_t* restrict cntx \ ) \ { \ const num_t dt = PASTEMAC(ch,type); \ \ const dim_t mr = bli_cntx_get_blksz_def_dt( dt, BLIS_MR, cntx ); \ const dim_t nr = bli_cntx_get_blksz_def_dt( dt, BLIS_NR, cntx ); \ \ const inc_t packmr = bli_cntx_get_blksz_max_dt( dt, BLIS_MR, cntx ); \ const inc_t packnr = bli_cntx_get_blksz_max_dt( dt, BLIS_NR, cntx ); \ \ const dim_t m = mr; \ const dim_t n = nr; \ \ const inc_t rs_a = 1; \ const inc_t cs_a = packmr; \ \ const inc_t rs_b = packnr; \ const inc_t cs_b = 1; \ \ dim_t iter, i, j, l; \ dim_t n_behind; \ \ for ( iter = 0; iter < m; ++iter ) \ { \ i = m - iter - 1; \ n_behind = iter; \ \ ctype* restrict alpha11 = a + (i )*rs_a + (i )*cs_a; \ ctype* restrict a12t = a + (i )*rs_a + (i+1)*cs_a; \ ctype* restrict b1 = b + (i )*rs_b + (0 )*cs_b; \ ctype* restrict B2 = b + (i+1)*rs_b + (0 )*cs_b; \ \ /* b1 = b1 - a12t * B2; */ \ /* b1 = b1 / alpha11; */ \ for ( j = 0; j < n; ++j ) \ { \ ctype* restrict beta11 = b1 + (0 )*rs_b + (j )*cs_b; \ ctype* restrict b21 = B2 + (0 )*rs_b + (j )*cs_b; \ ctype* restrict gamma11 = c + (i )*rs_c + (j )*cs_c; \ ctype beta11c = *beta11; \ ctype rho11; \ \ /* beta11 = beta11 - a12t * b21; */ \ PASTEMAC(ch,set0s)( rho11 ); \ for ( l = 0; l < n_behind; ++l ) \ { \ ctype* restrict alpha12 = a12t + (l )*cs_a; \ ctype* restrict beta21 = b21 + (l )*rs_b; \ \ PASTEMAC(ch,axpys)( *alpha12, *beta21, rho11 ); \ } \ PASTEMAC(ch,subs)( rho11, beta11c ); \ \ /* beta11 = beta11 / alpha11; */ \ /* NOTE: The INVERSE of alpha11 (1.0/alpha11) is stored instead of alpha11, so we can multiply rather than divide. We store the inverse of alpha11 intentionally to avoid expensive division instructions within the micro-kernel. */ \ PASTEMAC(ch,scals)( *alpha11, beta11c ); \ \ /* Output final result to matrix c. */ \ PASTEMAC(ch,copys)( beta11c, *gamma11 ); \ \ /* Store the local value back to b11. */ \ PASTEMAC(ch,copys)( beta11c, *beta11 ); \ } \ } \ } INSERT_GENTFUNC_BASIC2( trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX ) #endif
Jacobi2D-DiamondByHandParam-OMP_dyn.test.c
/****************************************************************************** * Jacobi2D benchmark * Diamond tiling parameterized by hand. * * Copied Jacobi2D-DiamondSlabISCCParam-OMP-test.c to get similar format * to other drivers and then put in parameterized loop bounds. * * Look for the notes titled "Parameterizing diamond tiling by hand" in * ProjectNotes/chapel-diamond-MMS-log.txt for the details of how this was done. * * Usage: * make omp * export OMP_NUM_THREADS=8 * bin/Jacobi2D-DiamondISCCParam-OMP \ * `cat src/Jacobi2D-DiamondByHandParam-OMP.perfexecopts` * For a run on 8 threads * * Use * bin/Jacobi2D-DiamondByHandParam-OMP -h * to get a list of command-line arguments. It is possible that not all work * for this driver. * ******************************************************************************/ #include <stdio.h> #include <omp.h> #include <time.h> #include <stdlib.h> #include <getopt.h> #include <stdbool.h> #include <ctype.h> #include <math.h> #include <assert.h> #define STENCIL(read,write,x,y) space[write][x][y] = \ ( space[read][x-1][y] +\ space[read][x][y] +\ space[read][x+1][y] +\ space[read][x][y+1] +\ space[read][x][y-1] )/5; #include "util.h" // main // Stages // 1 - command line parsing // 2 - data allocation and initialization // 3 - jacobi 1D timed within an openmp loop // 4 - output and optional verification int main( int argc, char* argv[] ){ // rather than calling fflush setbuf(stdout, NULL); // 1 - command line parsing Params cmdLineArgs; parseCmdLineArgs(&cmdLineArgs,argc,argv); // T is used down it loop code a lot. int T = cmdLineArgs.T; // If tau was not defined at compile time then declare a tau variable // and grab the value set with command-line options. // If it was defined at compile time then all instances of "tau" will be replaced // with that number by the compiler. #if ! defined tau int tau = cmdLineArgs.tau_runtime; #endif // 2 - data allocation and initialization int lowerBound = 1; int upperBound = lowerBound + cmdLineArgs.problemSize - 1; double** space[2]; int i; // allocate x axis space[0] = (double**)malloc((cmdLineArgs.problemSize + 2) * sizeof(double*)); space[1] = (double**)malloc((cmdLineArgs.problemSize + 2) * sizeof(double*)); if( space[0] == NULL || space[1] == NULL ){ printf( "Could not allocate x axis of space array\n" ); exit(0); } // allocate y axis for( i = 0; i < cmdLineArgs.problemSize + 2; ++i ){ space[0][i]=(double*)malloc((cmdLineArgs.problemSize + 2) * sizeof(double)); space[1][i]=(double*)malloc((cmdLineArgs.problemSize + 2) * sizeof(double)); if( space[0][i] == NULL || space[1][i] == NULL ){ printf( "Could not allocate y axis of space array\n" ); exit(0); } } // use global seed to seed the random number gen (will be constant) srand(cmdLineArgs.globalSeed); // first touch for openmp // FIXME: we will have to specialize first touch for diamond tiles int x, y; #pragma omp parallel for private( x, y ) collapse(2) schedule(dynamic) for( x = lowerBound; x <= upperBound; ++x ){ for( y = lowerBound; y <= upperBound; ++y ){ space[0][x][y] = 0; } } // seed the space. for( x = lowerBound; x <= upperBound; ++x ){ for( y = lowerBound; y <= upperBound; ++y ){ space[0][x][y] = rand() / (double)rand(); } } // set halo values (sanity) for( i = 0; i < cmdLineArgs.problemSize + 2; ++i){ space[0][i][0] = 0; space[1][i][0] = 0; space[0][i][cmdLineArgs.problemSize + 1] = 0; space[1][i][cmdLineArgs.problemSize + 1] = 0; space[0][0][i] = 0; space[1][0][i] = 0; space[0][cmdLineArgs.problemSize + 1][i] = 0; space[1][cmdLineArgs.problemSize + 1][i] = 0; } // 3 - jacobi 2D timed within an openmp loop double start_time = omp_get_wtime(); int read=0,write=1; int num_tiles = 0; int tau_times_3 = tau*3; // Set lower and upper bounds for spatial dimensions. int Li=1, Lj=1, Ui=upperBound, Uj=upperBound; int thyme, k1, k2, t, j; // Loop over tile wavefronts. //for (thyme=-2; thyme<=floord(3*T,tau); thyme++) { // rev 298 for (thyme=ceild(3,tau)-3; thyme<=floord(3*T,tau); thyme++){ // from paper // The next two loops iterate within a tile wavefront. //int k1_lb = floord(3*Lj+2+(thyme-2)*tau,tau_times_3); // rev 298 //int k1_ub = floord(3*Uj+(thyme+2)*tau-2,tau_times_3); int k1_lb = ceild(3*Lj+2+(thyme-2)*tau,tau*3); // paper int k1_ub = floord(3*Uj+(thyme+2)*tau,tau*3); // int k2_lb = floord((2*thyme-2)*tau-3*Ui+2,tau_times_3); // int k2_ub = floord((2+2*thyme)*tau-2-3*Li,tau_times_3); #define k2_lb_exp(k1val) floord((2*thyme-2)*tau-3*Ui+2,tau*3)-(k1val) #define k2_ub_exp(k1val) floord((2+2*thyme)*tau-3*Li-2,tau*3)-(k1val) // bounding box for k2 int k2_lb = k2_lb_exp(k1_ub); // min possible value of expression int k2_ub = k2_ub_exp(k1_lb); // max possible value of expression //num_tiles+=(k2_ub-k2_lb+1)*(k1_ub-k1_lb+1); //printf("Number of tiles: %d\n",(k2_ub-k2_lb+1)*(k1_ub-k1_lb+1)); #pragma omp parallel for shared(start_time, Li, Lj, Ui, Uj ) private(read, write,k1,k2,t,i,j) schedule(dynamic) collapse(2) for (k1=k1_lb; k1<=k1_ub; k1++) { for (int x=k2_lb; x<=k2_ub; x++) { k2 = x; // Loop over time within a tile. //for (t=max(1,floord(thyme*tau,3)); // rev 298 // t<= min(T,floord((3+thyme)*tau-3,3)); t++) { for (t = max(1,floord(thyme*tau-1, 3) + 1); // from paper t < min(T+1, tau + floord(thyme*tau, 3)); t+=1) { // if t % 2 is 1, then read=0 and write=1 write = t & 1; read = 1-write; // Loops over spatial dimensions within tile. //for (i=max(Li,max((thyme-k1-k2)*tau-t, 2*t-(2+k1+k2)*tau+2));// rev 298 // i<=min(Ui,min((1+thyme-k1-k2)*tau-t-1, 2*t-(k1+k2)*tau)); i++) { // for (j=max(Lj,max(tau*k1-t,t-i-(1+k2)*tau+1)); // j<=min(Uj,min((1+k1)*tau-t-1,t-i-k2*tau)); j++) { for (i = max(Li, max(-2*tau-k1*tau-k2*tau+2*t+2, // from paper (thyme-k1-k2)*tau-t)); i <= min(Ui, min(tau+(thyme-k1-k2)*tau-t-1, -k1*tau-k2*tau+2*t)); i+=1) { for (j = max(Li,max(k1*tau-t, -tau-k2*tau+t-i+1)); j <= min(Ui,min(tau+k1*tau-t-1, -k2*tau+t-i)); j+=1) { STENCIL( read, write, i, j); } // for j } // for i } // for t } // for k2 } // for k1 } // for thyme double end_time = omp_get_wtime(); double time = (end_time - start_time); // 4 - output and optional verification if( cmdLineArgs.printtime ){ /* printf("Threads: %d, ",cmdLineArgs.cores); printf( "T: %d, ", T ); printf( "N: %d, ", upperBound ); printf( "tau: %d, ", tau ); printf( "num_tiles: %d, ", num_tiles ); */ printf( "Time: %f", time ); } if( cmdLineArgs.verify ){ if(!verifyResultJacobi2D(space[cmdLineArgs.T & 1],cmdLineArgs.problemSize, cmdLineArgs.globalSeed,cmdLineArgs.T )){ fprintf(stderr,"FAILURE\n"); }else{ fprintf(stderr,"SUCCESS\n"); } } }
betweennessCentrality.c
#include "defs.h" extern LONG_T *volatile tmp_start; extern LONG_T *volatile tmp_end; extern VERT_T *volatile tmp_S; extern DOUBLE_T *volatile tmp_sig; extern LONG_T *volatile tmp_d; extern DOUBLE_T *volatile tmp_del; extern plist *volatile tmp_P; extern graph *volatile tmp_G; extern int volatile push_flag; extern int volatile g_hot2_j; extern int volatile g_phase_num; extern int volatile g_vertex; extern DOUBLE_T *volatile tmp_BC; double betweennessCentrality(graph* G, DOUBLE_T* BC) { VERT_T *S; /* stack of vertices in the order of non-decreasing distance from s. Also used to implicitly represent the BFS queue */ plist* P; /* predecessors of a vertex v on shortest paths from s */ DOUBLE_T* sig; /* No. of shortest paths */ LONG_T* d; /* Length of the shortest path between every pair */ DOUBLE_T* del; /* dependency of vertices */ LONG_T *in_degree, *numEdges, *pSums; LONG_T *pListMem; LONG_T* Srcs; LONG_T *start, *end; LONG_T MAX_NUM_PHASES; LONG_T *psCount; #ifdef _OPENMP omp_lock_t* vLock; LONG_T chunkSize; #endif int seed = 2387; double elapsed_time; #ifdef _OPENMP #pragma omp parallel { #endif VERT_T *myS, *myS_t; LONG_T myS_size; LONG_T i, j, k, p, count, myCount; LONG_T v, w, vert; LONG_T numV, num_traversals, n, m, phase_num; LONG_T tid, nthreads; int* stream; #ifdef DIAGNOSTIC double elapsed_time_part; #endif #ifdef _OPENMP int myLock; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); #else tid = 0; nthreads = 1; #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds(); } #endif /* numV: no. of vertices to run BFS from = 2^K4approx */ numV = 1<<K4approx; n = G->n; m = G->m; /* Permute vertices */ if (tid == 0) { Srcs = (LONG_T *) malloc(n*sizeof(LONG_T)); #ifdef _OPENMP vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t)); #endif } #ifdef _OPENMP #pragma omp barrier #pragma omp for for (i=0; i<n; i++) { omp_init_lock(&vLock[i]); } #endif /* Initialize RNG stream */ stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT); #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { Srcs[i] = i; } #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { j = n*sprng(stream); if (i != j) { #ifdef _OPENMP int l1 = omp_test_lock(&vLock[i]); if (l1) { int l2 = omp_test_lock(&vLock[j]); if (l2) { #endif k = Srcs[i]; Srcs[i] = Srcs[j]; Srcs[j] = k; #ifdef _OPENMP omp_unset_lock(&vLock[j]); } omp_unset_lock(&vLock[i]); } #endif } } #ifdef _OPENMP #pragma omp barrier #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "Vertex ID permutation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Start timing code from here */ if (tid == 0) { elapsed_time = get_seconds(); #ifdef VERIFYK4 MAX_NUM_PHASES = 2*sqrt(n); #else MAX_NUM_PHASES = 50; #endif } #ifdef _OPENMP #pragma omp barrier #endif /* Initialize predecessor lists */ /* The size of the predecessor list of each vertex is bounded by its in-degree. So we first compute the in-degree of every vertex */ if (tid == 0) { P = (plist *) calloc(n, sizeof(plist)); in_degree = (LONG_T *) calloc(n+1, sizeof(LONG_T)); numEdges = (LONG_T *) malloc((n+1)*sizeof(LONG_T)); pSums = (LONG_T *) malloc(nthreads*sizeof(LONG_T)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<m; i++) { v = G->endV[i]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif in_degree[v]++; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } prefix_sums(in_degree, numEdges, pSums, n); if (tid == 0) { pListMem = (LONG_T *) malloc(m*sizeof(LONG_T)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<n; i++) { P[i].list = pListMem + numEdges[i]; P[i].degree = in_degree[i]; P[i].count = 0; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "In-degree computation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Allocate shared memory */ if (tid == 0) { free(in_degree); free(numEdges); free(pSums); S = (VERT_T *) malloc(n*sizeof(VERT_T)); sig = (DOUBLE_T *) malloc(n*sizeof(DOUBLE_T)); d = (LONG_T *) malloc(n*sizeof(LONG_T)); del = (DOUBLE_T *) calloc(n, sizeof(DOUBLE_T)); start = (LONG_T *) malloc(MAX_NUM_PHASES*sizeof(LONG_T)); end = (LONG_T *) malloc(MAX_NUM_PHASES*sizeof(LONG_T)); psCount = (LONG_T *) malloc((nthreads+1)*sizeof(LONG_T)); } /* local memory for each thread */ myS_size = (2*n)/nthreads; myS = (LONG_T *) malloc(myS_size*sizeof(LONG_T)); num_traversals = 0; myCount = 0; #ifdef _OPENMP #pragma omp barrier #endif #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { d[i] = -1; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "BC initialization time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif tmp_start=start; tmp_end=end; tmp_S=S; tmp_d=d; tmp_sig=sig; tmp_del=del; tmp_P=P; tmp_G=G; tmp_BC=BC; for (p=0; p<n; p++) { i = Srcs[p]; if (G->numEdges[i+1] - G->numEdges[i] == 0) { continue; } else { num_traversals++; } if (num_traversals == numV + 1) { break; } if (tid == 0) { sig[i] = 1; d[i] = 0; S[0] = i; start[0] = 0; end[0] = 1; } count = 1; phase_num = 0; #ifdef _OPENMP #pragma omp barrier #endif while (end[phase_num] - start[phase_num] > 0) { myCount = 0; #ifdef _OPENMP #pragma omp barrier #pragma omp for schedule(dynamic) #endif push_flag=1; for (vert = start[phase_num]; vert < end[phase_num]; vert++) { g_vertex=vert; v = S[vert]; for (j=G->numEdges[v]; j<G->numEdges[v+1]; j++) { #ifndef VERIFYK4 /* Filter edges with weights divisible by 8 */ if ((G->weight[j] & 7) != 0) { #endif w = G->endV[j]; if (v != w) { #ifdef _OPENMP myLock = omp_test_lock(&vLock[w]); if (myLock) { #endif /* w found for the first time? */ if (d[w] == -1) { if (myS_size == myCount) { /* Resize myS */ myS_t = (LONG_T *) malloc(2*myS_size*sizeof(VERT_T)); memcpy(myS_t, myS, myS_size*sizeof(VERT_T)); free(myS); myS = myS_t; myS_size = 2*myS_size; } myS[myCount++] = w; d[w] = d[v] + 1; sig[w] = sig[v]; P[w].list[P[w].count++] = v; } else if (d[w] == d[v] + 1) { sig[w] += sig[v]; P[w].list[P[w].count++] = v; } #ifdef _OPENMP omp_unset_lock(&vLock[w]); } else { if ((d[w] == -1) || (d[w] == d[v]+ 1)) { omp_set_lock(&vLock[w]); sig[w] += sig[v]; P[w].list[P[w].count++] = v; omp_unset_lock(&vLock[w]); } } #endif } #ifndef VERIFYK4 } #endif } } /* Merge all local stacks for next iteration */ push_flag=0; phase_num++; g_phase_num=phase_num; psCount[tid+1] = myCount; #ifdef _OPENMP #pragma omp barrier #endif if (tid == 0) { start[phase_num] = end[phase_num-1]; psCount[0] = start[phase_num]; for(k=1; k<=nthreads; k++) { psCount[k] = psCount[k-1] + psCount[k]; } end[phase_num] = psCount[nthreads]; } #ifdef _OPENMP #pragma omp barrier #endif for (k = psCount[tid]; k < psCount[tid+1]; k++) { S[k] = myS[k-psCount[tid]]; } #ifdef _OPENMP #pragma omp barrier #endif count = end[phase_num]; } phase_num--; #ifdef _OPENMP #pragma omp barrier #endif g_phase_num=phase_num; while (phase_num > 0) { #ifdef _OPENMP #pragma omp for #endif push_flag=2; for (j=start[phase_num]; j<end[phase_num]; j++) { g_hot2_j=j; w = S[j]; for (k = 0; k<P[w].count; k++) { v = P[w].list[k]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif del[v] = del[v] + sig[v]*(1+del[w])/sig[w]; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } BC[w] += del[w]; } push_flag=0; phase_num--; g_phase_num=phase_num; #ifdef _OPENMP #pragma omp barrier #endif } #ifdef _OPENMP chunkSize = n/nthreads; #pragma omp for schedule(static, chunkSize) #endif for (j=0; j<count; j++) { w = S[j]; d[w] = -1; del[w] = 0; P[w].count = 0; } #ifdef _OPENMP #pragma omp barrier #endif } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "BC computation time: %lf seconds\n", elapsed_time_part); } #endif #ifdef _OPENMP #pragma omp for for (i=0; i<n; i++) { omp_destroy_lock(&vLock[i]); } #endif free(myS); if (tid == 0) { free(S); free(pListMem); free(P); free(sig); free(d); free(del); #ifdef _OPENMP free(vLock); #endif free(start); free(end); free(psCount); elapsed_time = get_seconds() - elapsed_time; free(Srcs); } free_sprng(stream); #ifdef _OPENMP } #endif /* Verification */ #ifdef VERIFYK4 double BCval; if (SCALE % 2 == 0) { BCval = 0.5*pow(2, 3*SCALE/2)-pow(2, SCALE)+1.0; } else { BCval = 0.75*pow(2, (3*SCALE-1)/2)-pow(2, SCALE)+1.0; } int failed = 0; for (int i=0; i<G->n; i++) { if (round(BC[i] - BCval) != 0) { failed = 1; break; } } if (failed) { fprintf(stderr, "Kernel 4 failed validation!\n"); } else { fprintf(stderr, "Kernel 4 validation successful!\n"); } #endif return elapsed_time; }
fill_r_3c.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <stdio.h> #include <complex.h> #include "config.h" #include "cint.h" int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); /* * out[naoi,naoj,naok,comp] in F-order */ void GTOr3c_fill_s1(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t nij = naoi * naoj; const int dims[] = {naoi, naoj, naok}; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += jp * naoi + ip; int ksh, k0; int shls[3]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; k0 = ao_loc[ksh ] - ao_loc[ksh0]; (*intor)(out+k0*nij, dims, shls, atm, natm, bas, nbas, env, cintopt, buf); } } static void zcopy_s2_igtj(double complex *out, double complex *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } static void zcopy_s2_ieqj(double complex *out, double complex *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } /* * out[comp,naok,nij] in C-order * nij = i1*(i1+1)/2 - i0*(i0+1)/2 * [ \ ] * [**** ] * [***** ] * [*****. ] <= . may not be filled, if jsh-upper-bound < ish-upper-bound * [ \] */ void GTOr3c_fill_s2ij(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int i0 = ao_loc[ish0]; const int i1 = ao_loc[ish1]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off = i0 * (i0 + 1) / 2; const size_t nij = i1 * (i1 + 1) / 2 - off; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; out += ip * (ip + 1) / 2 - off + jp; int ksh, dk, k0; int shls[3]; dk = GTOmax_shell_dim(ao_loc, shls_slice, 3); double *cache = (double *)(buf + di * dj * dk * comp); shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; dk = ao_loc[ksh+1] - ao_loc[ksh]; k0 = ao_loc[ksh ] - ao_loc[ksh0]; (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache); if (ip != jp) { zcopy_s2_igtj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk); } else { zcopy_s2_ieqj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk); } } } void GTOr3c_fill_s2jk(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { fprintf(stderr, "GTOr3c_fill_s2jk not implemented\n"); exit(1); } void GTOr3c_drv(int (*intor)(), void (*fill)(), double complex *eri, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 3); const int cache_size = GTOmax_cache_size(intor, shls_slice, 3, atm, natm, bas, nbas, env); #pragma omp parallel default(none) \ shared(intor, fill, eri, comp, shls_slice, ao_loc, cintopt, \ atm, natm, bas, nbas, env) { int ish, jsh, ij; double complex *buf = malloc(sizeof(double complex) * (di*di*di*comp + cache_size/2)); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill)(intor, eri, buf, comp, ish, jsh, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
distance.c
/* * R : A Computer Language for Statistical Data Analysis * Copyright (C) 1998-2017 The R Core Team * Copyright (C) 2002-2017 The R Foundation * Copyright (C) 1995, 1996 Robert Gentleman and Ross Ihaka * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, a copy is available at * https://www.R-project.org/Licenses/ */ #ifdef HAVE_CONFIG_H # include <config.h> #endif #include <float.h> #include <R.h> #include <Rmath.h> #include "stats.h" #ifdef _OPENMP # include <R_ext/MathThreads.h> #endif #define both_FINITE(a,b) (R_FINITE(a) && R_FINITE(b)) #ifdef R_160_and_older #define both_non_NA both_FINITE #else #define both_non_NA(a,b) (!ISNAN(a) && !ISNAN(b)) #endif static double R_euclidean(double *x, int nr, int nc, int i1, int i2) { double dev, dist; int count, j; count= 0; dist = 0; for(j = 0 ; j < nc ; j++) { if(both_non_NA(x[i1], x[i2])) { dev = (x[i1] - x[i2]); if(!ISNAN(dev)) { dist += dev * dev; count++; } } i1 += nr; i2 += nr; } if(count == 0) return NA_REAL; if(count != nc) dist /= ((double)count/nc); return sqrt(dist); } static double R_maximum(double *x, int nr, int nc, int i1, int i2) { double dev, dist; int count, j; count = 0; dist = -DBL_MAX; for(j = 0 ; j < nc ; j++) { if(both_non_NA(x[i1], x[i2])) { dev = fabs(x[i1] - x[i2]); if(!ISNAN(dev)) { if(dev > dist) dist = dev; count++; } } i1 += nr; i2 += nr; } if(count == 0) return NA_REAL; return dist; } static double R_manhattan(double *x, int nr, int nc, int i1, int i2) { double dev, dist; int count, j; count = 0; dist = 0; for(j = 0 ; j < nc ; j++) { if(both_non_NA(x[i1], x[i2])) { dev = fabs(x[i1] - x[i2]); if(!ISNAN(dev)) { dist += dev; count++; } } i1 += nr; i2 += nr; } if(count == 0) return NA_REAL; if(count != nc) dist /= ((double)count/nc); return dist; } static double R_canberra(double *x, int nr, int nc, int i1, int i2) { double dev, dist, sum, diff; int count, j; count = 0; dist = 0; for(j = 0 ; j < nc ; j++) { if(both_non_NA(x[i1], x[i2])) { sum = fabs(x[i1]) + fabs(x[i2]); diff = fabs(x[i1] - x[i2]); if (sum > DBL_MIN || diff > DBL_MIN) { dev = diff/sum; if(!ISNAN(dev) || (!R_FINITE(diff) && diff == sum && /* use Inf = lim x -> oo */ (dev = 1., TRUE))) { dist += dev; count++; } } } i1 += nr; i2 += nr; } if(count == 0) return NA_REAL; if(count != nc) dist /= ((double)count/nc); return dist; } static double R_dist_binary(double *x, int nr, int nc, int i1, int i2) { int total, count, dist; int j; total = 0; count = 0; dist = 0; for(j = 0 ; j < nc ; j++) { if(both_non_NA(x[i1], x[i2])) { if(!both_FINITE(x[i1], x[i2])) { warning(_("treating non-finite values as NA")); } else { if(x[i1] != 0. || x[i2] != 0.) { count++; if( ! (x[i1] != 0. && x[i2] != 0.) ) dist++; } total++; } } i1 += nr; i2 += nr; } if(total == 0) return NA_REAL; if(count == 0) return 0; return (double) dist / count; } static double R_minkowski(double *x, int nr, int nc, int i1, int i2, double p) { double dev, dist; int count, j; count= 0; dist = 0; for(j = 0 ; j < nc ; j++) { if(both_non_NA(x[i1], x[i2])) { dev = (x[i1] - x[i2]); if(!ISNAN(dev)) { dist += R_pow(fabs(dev), p); count++; } } i1 += nr; i2 += nr; } if(count == 0) return NA_REAL; if(count != nc) dist /= ((double)count/nc); return R_pow(dist, 1.0/p); } enum { EUCLIDEAN=1, MAXIMUM, MANHATTAN, CANBERRA, BINARY, MINKOWSKI }; /* == 1,2,..., defined by order in the R function dist */ void R_distance(double *x, int *nr, int *nc, double *d, int *diag, int *method, double *p) { int dc, i, j; size_t ij; /* can exceed 2^31 - 1 */ double (*distfun)(double*, int, int, int, int) = NULL; #ifdef _OPENMP int nthreads; #endif switch(*method) { case EUCLIDEAN: distfun = R_euclidean; break; case MAXIMUM: distfun = R_maximum; break; case MANHATTAN: distfun = R_manhattan; break; case CANBERRA: distfun = R_canberra; break; case BINARY: distfun = R_dist_binary; break; case MINKOWSKI: if(!R_FINITE(*p) || *p <= 0) error(_("distance(): invalid p")); // plus special case below because of extra argument break; default: error(_("distance(): invalid distance")); } dc = (*diag) ? 0 : 1; /* diag=1: we do the diagonal */ #ifdef _OPENMP if (R_num_math_threads > 0) nthreads = R_num_math_threads; else nthreads = 1; /* for now */ if (nthreads == 1) { /* do the nthreads == 1 case without any OMP overhead to see if it matters on some platforms */ ij = 0; for(j = 0 ; j <= *nr ; j++) for(i = j+dc ; i < *nr ; i++) d[ij++] = (*method != MINKOWSKI) ? distfun(x, *nr, *nc, i, j) : R_minkowski(x, *nr, *nc, i, j, *p); } else /* This produces uneven thread workloads since the outer loop is over the subdiagonal portions of columns. An alternative would be to use a loop on ij and to compute the i and j values from ij. */ #pragma omp parallel for num_threads(nthreads) default(none) \ private(i, j, ij) \ firstprivate(nr, dc, d, method, distfun, nc, x, p) for(j = 0 ; j <= *nr ; j++) { ij = j * (*nr - dc) + j - ((1 + j) * j) / 2; for(i = j+dc ; i < *nr ; i++) d[ij++] = (*method != MINKOWSKI) ? distfun(x, *nr, *nc, i, j) : R_minkowski(x, *nr, *nc, i, j, *p); } #else ij = 0; for(j = 0 ; j <= *nr ; j++) for(i = j+dc ; i < *nr ; i++) d[ij++] = (*method != MINKOWSKI) ? distfun(x, *nr, *nc, i, j) : R_minkowski(x, *nr, *nc, i, j, *p); #endif } #include <Rinternals.h> SEXP Cdist(SEXP x, SEXP smethod, SEXP attrs, SEXP p) { SEXP ans; int nr = nrows(x), nc = ncols(x), method = asInteger(smethod); int diag = 0; R_xlen_t N; double rp = asReal(p); N = (R_xlen_t)nr * (nr-1)/2; /* avoid int overflow for N ~ 50,000 */ PROTECT(ans = allocVector(REALSXP, N)); if(TYPEOF(x) != REALSXP) x = coerceVector(x, REALSXP); PROTECT(x); R_distance(REAL(x), &nr, &nc, REAL(ans), &diag, &method, &rp); /* tack on attributes */ SEXP names = getAttrib(attrs, R_NamesSymbol); for (int i = 0; i < LENGTH(attrs); i++) setAttrib(ans, install(translateChar(STRING_ELT(names, i))), VECTOR_ELT(attrs, i)); UNPROTECT(2); return ans; }
opencl_office2007_fmt_plug.c
/* MS Office 2007 cracker patch for JtR. Hacked together during March of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com> * * OpenCL support by magnum. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * and Copyright (c) 2012, magnum and it is hereby released to the general public * under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_office2007; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_office2007); #else #include "sha.h" #include "aes.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "unicode.h" #include "common-opencl.h" #include "office_common.h" #include "config.h" #define PLAINTEXT_LENGTH 51 #define UNICODE_LENGTH 104 /* In octets, including 0x80 */ #define FORMAT_LABEL "office2007-opencl" #define FORMAT_NAME "MS Office 2007" #define OCL_ALGORITHM_NAME "SHA1 OpenCL" #define CPU_ALGORITHM_NAME " AES" #define ALGORITHM_NAME OCL_ALGORITHM_NAME CPU_ALGORITHM_NAME #define BENCHMARK_COMMENT " (50,000 iterations)" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SALT_LENGTH 16 #define SALT_SIZE sizeof(*cur_salt) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"$office$*2007*20*128*16*8b2c9e8c878844fc842012273be4bea8*aa862168b80d8c45c852696a8bb499eb*a413507fabe2d87606595f987f679ff4b5b4c2cd", "Password"}, /* 2007-Default_myhovercraftisfullofeels_.docx */ {"$office$*2007*20*128*16*91f095a1fd02595359fe3938fa9236fd*e22668eb1347957987175079e980990f*659f50b9062d36999bf3d0911068c93268ae1d86", "myhovercraftisfullofeels"}, /* 2007-Default_myhovercraftisfullofeels_.dotx */ {"$office$*2007*20*128*16*56ea65016fbb4eac14a6770b2dbe7e99*8cf82ce1b62f01fd3b2c7666a2313302*21443fe938177e648c482da72212a8848c2e9c80", "myhovercraftisfullofeels"}, /* 2007-Default_myhovercraftisfullofeels_.xlsb */ {"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*3a040a9cef3d3675009b22f99718e39c*48053b27e95fa53b3597d48ca4ad41eec382e0c8", "myhovercraftisfullofeels"}, /* 2007-Default_myhovercraftisfullofeels_.xlsm */ {"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*92bb2ef34ca662ca8a26c8e2105b05c0*0261ba08cd36a324aa1a70b3908a24e7b5a89dd6", "myhovercraftisfullofeels"}, /* 2007-Default_myhovercraftisfullofeels_.xlsx */ {"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*46bef371486919d4bffe7280110f913d*b51af42e6696baa097a7109cebc3d0ff7cc8b1d8", "myhovercraftisfullofeels"}, /* 2007-Default_myhovercraftisfullofeels_.xltx */ {"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*1addb6823689aca9ce400be8f9e55fc9*e06bf10aaf3a4049ffa49dd91cf9e7bbf88a1b3b", "myhovercraftisfullofeels"}, {NULL} }; static ms_office_custom_salt *cur_salt; static ARCH_WORD_32 (*crypt_key)[4]; static char *saved_key; /* Password encoded in UCS-2 */ static int *saved_len; /* UCS-2 password length, in octets */ static char *saved_salt; static unsigned char *key; /* Output key from kernel */ static int new_keys; static cl_mem cl_saved_key, cl_saved_len, cl_salt, cl_pwhash, cl_key; static cl_mem pinned_saved_key, pinned_saved_len, pinned_salt, pinned_key; static cl_kernel GenerateSHA1pwhash, Generate2007key; static struct fmt_main *self; #define HASH_LOOPS 500 /* Lower figure gives less X hogging */ #define ITERATIONS 50000 #define STEP 0 #define SEED 128 static const char * warn[] = { "xfer: ", ", xfer: ", ", init: ", ", loop: ", ", final: ", ", xfer: " }; static int split_events[] = { 3, -1, -1 }; //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, GenerateSHA1pwhash); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel)); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, Generate2007key)); return s; } static void create_clobj(size_t gws, struct fmt_main *self) { int i; int bench_len = strlen(tests[0].plaintext) * 2; gws *= ocl_v_width; pinned_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, UNICODE_LENGTH * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, UNICODE_LENGTH * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_key = (char*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, UNICODE_LENGTH * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_key"); memset(saved_key, 0, UNICODE_LENGTH * gws); pinned_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, sizeof(cl_int) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(cl_int) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_len = (int*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_len, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, sizeof(cl_int) * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_len"); for (i = 0; i < gws; i++) saved_len[i] = bench_len; pinned_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, SALT_LENGTH, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, SALT_LENGTH, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_salt = (char*) clEnqueueMapBuffer(queue[gpu_id], pinned_salt, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, SALT_LENGTH, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_salt"); memset(saved_salt, 0, SALT_LENGTH); cl_pwhash = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_uint) * 6 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device state buffer"); pinned_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, 16 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, 16 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); key = (unsigned char*) clEnqueueMapBuffer(queue[gpu_id], pinned_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, 16 * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory key"); memset(key, 0, 16 * gws); HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 0, sizeof(cl_mem), (void*)&cl_saved_key), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 1, sizeof(cl_mem), (void*)&cl_saved_len), "Error setting argument 1"); HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 2, sizeof(cl_mem), (void*)&cl_salt), "Error setting argument 2"); HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 3, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 3"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(Generate2007key, 0, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(Generate2007key, 1, sizeof(cl_mem), (void*)&cl_key), "Error setting argument 1"); crypt_key = mem_calloc(gws, sizeof(*crypt_key)); } static void release_clobj(void) { if (crypt_key) { HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_key, key, 0, NULL, NULL), "Error Unmapping key"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_key, saved_key, 0, NULL, NULL), "Error Unmapping saved_key"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_len, saved_len, 0, NULL, NULL), "Error Unmapping saved_len"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_salt, saved_salt, 0, NULL, NULL), "Error Unmapping saved_salt"); HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error releasing memory mappings"); HANDLE_CLERROR(clReleaseMemObject(pinned_key), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(pinned_saved_key), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(pinned_saved_len), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(pinned_salt), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_key), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_saved_key), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_saved_len), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_salt), "Release GPU buffer"); HANDLE_CLERROR(clReleaseMemObject(cl_pwhash), "Release GPU buffer"); MEM_FREE(crypt_key); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(GenerateSHA1pwhash), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(Generate2007key), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void clear_keys(void) { memset(saved_key, 0, UNICODE_LENGTH * global_work_size * ocl_v_width); memset(saved_len, 0, sizeof(*saved_len) * global_work_size * ocl_v_width); } static void set_key(char *key, int index) { UTF16 *utfkey = (UTF16*)&saved_key[index * UNICODE_LENGTH]; /* convert key to UTF-16LE */ saved_len[index] = enc_to_utf16(utfkey, PLAINTEXT_LENGTH, (UTF8*)key, strlen(key)); if (saved_len[index] < 0) saved_len[index] = strlen16(utfkey); /* Prepare for GPU */ utfkey[saved_len[index]] = 0x80; saved_len[index] <<= 1; new_keys = 1; //dump_stuff_msg("key buffer", &saved_key[index*UNICODE_LENGTH], UNICODE_LENGTH); } static void set_salt(void *salt) { cur_salt = (ms_office_custom_salt *)salt; memcpy(saved_salt, cur_salt->osalt, SALT_LENGTH); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_salt, CL_FALSE, 0, SALT_LENGTH, saved_salt, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_salt"); } static void init(struct fmt_main *_self) { static char valgo[32] = ""; self = _self; opencl_prepare_dev(gpu_id); if ((ocl_v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int))) > 1) { /* Run vectorized kernel */ snprintf(valgo, sizeof(valgo), OCL_ALGORITHM_NAME " %ux" CPU_ALGORITHM_NAME, ocl_v_width); self->params.algorithm_name = valgo; } if (options.target_enc == UTF_8) self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DHASH_LOOPS=%u -DUNICODE_LENGTH=%u -DV_WIDTH=%u", HASH_LOOPS, UNICODE_LENGTH, ocl_v_width); opencl_init("$JOHN/kernels/office2007_kernel.cl", gpu_id, build_opts); // create kernel to execute GenerateSHA1pwhash = clCreateKernel(program[gpu_id], "GenerateSHA1pwhash", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); crypt_kernel = clCreateKernel(program[gpu_id], "HashLoop", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); Generate2007key = clCreateKernel(program[gpu_id], "Generate2007key", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, HASH_LOOPS, split_events, warn, 3, self, create_clobj, release_clobj, ocl_v_width * UNICODE_LENGTH, 0, db); // Auto tune execution from shared/included code. autotune_run(self, ITERATIONS + 4, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL)); } } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t gws, scalar_gws; size_t *lws = local_work_size ? &local_work_size : NULL; gws = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size); scalar_gws = gws * ocl_v_width; if (ocl_autotune_running || new_keys) { BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_key, CL_FALSE, 0, UNICODE_LENGTH * scalar_gws, saved_key, 0, NULL, multi_profilingEvent[0]), "failed in clEnqueueWriteBuffer saved_key"); BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_len, CL_FALSE, 0, sizeof(int) * scalar_gws, saved_len, 0, NULL, multi_profilingEvent[1]), "failed in clEnqueueWriteBuffer saved_len"); new_keys = 0; } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], GenerateSHA1pwhash, 1, NULL, &scalar_gws, lws, 0, NULL, multi_profilingEvent[2]), "failed in clEnqueueNDRangeKernel"); for (index = 0; index < (ocl_autotune_running ? 1 : 50000 / HASH_LOOPS); index++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, lws, 0, NULL, multi_profilingEvent[3]), "failed in clEnqueueNDRangeKernel"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], Generate2007key, 1, NULL, &gws, lws, 0, NULL, multi_profilingEvent[4]), "failed in clEnqueueNDRangeKernel"); // read back aes key BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_key, CL_TRUE, 0, 16 * scalar_gws, key, 0, NULL, multi_profilingEvent[5]), "failed in reading key back"); if (!ocl_autotune_running) { #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) ms_office_common_PasswordVerifier(cur_salt, &key[index*16], crypt_key[index]); } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) { if ( ((ARCH_WORD_32*)binary)[0] == crypt_key[index][0] ) return 1; } return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_key[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; } static char *get_key(int index) { UTF16 buf[PLAINTEXT_LENGTH + 1]; memcpy(buf, &saved_key[index * UNICODE_LENGTH], saved_len[index]); buf[saved_len[index] >> 1] = 0; return (char*)utf16_to_enc(buf); } struct fmt_main fmt_opencl_office2007 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP, { NULL }, tests }, { init, done, reset, fmt_default_prepare, ms_office_common_valid_2007, fmt_default_split, ms_office_common_binary, ms_office_common_get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
matrix.h
/*************************************************************************** * include/stxxl/bits/containers/matrix.h * * Part of the STXXL. See http://stxxl.org * * Copyright (C) 2010-2011 Raoul Steffen <R-Steffen@gmx.de> * * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) **************************************************************************/ #ifndef STXXL_CONTAINERS_MATRIX_HEADER #define STXXL_CONTAINERS_MATRIX_HEADER #include <algorithm> #include <vector> #include <utility> #include <tlx/counting_ptr.hpp> #include <tlx/logger.hpp> #include <foxxll/mng/block_scheduler.hpp> #include <stxxl/bits/containers/vector.h> #include <stxxl/bits/containers/matrix_arithmetic.h> namespace stxxl { //! \defgroup matrix matrix //! Efficient external memory matrix operations //! \ingroup stlcont //! \{ /* index-variable naming convention: * [MODIFIER_][UNIT_]DIMENSION[_in_[MODIFIER_]ENVIRONMENT] * * e.g.: * block_row = number of row measured in rows consisting of blocks * element_row_in_block = number of row measured in rows consisting of elements in the (row of) block(s) * * size-variable naming convention: * [MODIFIER_][ENVIRONMENT_]DIMENSION[_in_UNITs] * * e.g. * height_in_blocks */ // forward declaration template <typename ValueType, unsigned BlockSideLength> class matrix; //! external column-vector container for matrix multiplication //! \tparam ValueType type of contained objects (POD with no references to internal memory) template <typename ValueType> class column_vector : public vector<ValueType> { public: using vector_type = vector<ValueType>; using size_type = typename vector_type::size_type; using vector_type::size; //! \param n number of elements explicit column_vector(size_type n = 0) : vector_type(n) { } column_vector operator + (const column_vector& right) const { assert(size() == right.size()); column_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] + right[i]; return res; } column_vector operator - (const column_vector& right) const { assert(size() == right.size()); column_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] - right[i]; return res; } column_vector operator * (const ValueType scalar) const { column_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] * scalar; return res; } column_vector& operator += (const column_vector& right) { assert(size() == right.size()); for (size_type i = 0; i < size(); ++i) (*this)[i] += right[i]; return *this; } column_vector& operator -= (const column_vector& right) { assert(size() == right.size()); for (size_type i = 0; i < size(); ++i) (*this)[i] -= right[i]; return *this; } column_vector& operator *= (const ValueType scalar) { for (size_type i = 0; i < size(); ++i) (*this)[i] *= scalar; return *this; } void set_zero() { for (typename vector_type::iterator it = vector_type::begin(); it != vector_type::end(); ++it) *it = 0; } }; //! external row-vector container for matrix multiplication //! \tparam ValueType type of contained objects (POD with no references to internal memory) template <typename ValueType> class row_vector : public vector<ValueType> { public: using vector_type = vector<ValueType>; using size_type = typename vector_type::size_type; using vector_type::size; //! \param n number of elements explicit row_vector(size_type n = 0) : vector_type(n) { } row_vector operator + (const row_vector& right) const { assert(size() == right.size()); row_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] + right[i]; return res; } row_vector operator - (const row_vector& right) const { assert(size() == right.size()); row_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] - right[i]; return res; } row_vector operator * (const ValueType scalar) const { row_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] * scalar; return res; } template <unsigned BlockSideLength> row_vector operator * (const matrix<ValueType, BlockSideLength>& right) const { return right.multiply_from_left(*this); } ValueType operator * (const column_vector<ValueType>& right) const { ValueType res = 0; for (size_type i = 0; i < size(); ++i) res += (*this)[i] * right[i]; return res; } row_vector& operator += (const row_vector& right) { assert(size() == right.size()); for (size_type i = 0; i < size(); ++i) (*this)[i] += right[i]; return *this; } row_vector& operator -= (const row_vector& right) { assert(size() == right.size()); for (size_type i = 0; i < size(); ++i) (*this)[i] -= right[i]; return *this; } row_vector& operator *= (const ValueType scalar) { for (size_type i = 0; i < size(); ++i) (*this)[i] *= scalar; return *this; } void set_zero() { for (typename vector_type::iterator it = vector_type::begin(); it != vector_type::end(); ++it) *it = 0; } }; //! Specialized swappable_block that interprets uninitialized as containing zeros. //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block //! //! When initializing, all values are set to zero. template <typename ValueType, unsigned BlockSideLength> class matrix_swappable_block : public foxxll::swappable_block<ValueType, BlockSideLength* BlockSideLength> { public: using internal_block_type = typename foxxll::swappable_block<ValueType, BlockSideLength* BlockSideLength>::internal_block_type; using foxxll::swappable_block<ValueType, BlockSideLength* BlockSideLength>::get_internal_block; void fill_default() { // get_internal_block checks acquired internal_block_type& data = get_internal_block(); #if STXXL_PARALLEL #pragma omp parallel for #endif for (unsigned row = 0; row < BlockSideLength; ++row) for (unsigned col = 0; col < BlockSideLength; ++col) data[row * BlockSideLength + col] = 0; } }; //! External container for a (sub)matrix. Not intended for direct use. //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block //! //! Stores blocks only, so all measures (height, width, row, col) are in blocks. template <typename ValueType, unsigned BlockSideLength> class swappable_block_matrix : public tlx::reference_counter { public: using size_type = size_t; using elem_size_type = size_t; using block_scheduler_type = foxxll::block_scheduler<matrix_swappable_block<ValueType, BlockSideLength> >; using swappable_block_identifier_type = typename block_scheduler_type::swappable_block_identifier_type; using blocks_type = std::vector<swappable_block_identifier_type>; using Ops = matrix_local::matrix_operations<ValueType, BlockSideLength>; block_scheduler_type& bs; private: // assigning is not allowed swappable_block_matrix& operator = (const swappable_block_matrix& other); protected: //! height of the matrix in blocks size_type height, //! width of the matrix in blocks width, //! height copied from supermatrix in blocks height_from_supermatrix, //! width copied from supermatrix in blocks width_from_supermatrix; //! the matrice's blocks in row-major blocks_type blocks; //! if the elements in each block are in col-major instead of row-major bool elements_in_blocks_transposed; //! get identifier of the block at (row, col) swappable_block_identifier_type & bl(const size_type row, const size_type col) { return blocks[row * width + col]; } public: //! Create an empty swappable_block_matrix of given dimensions. swappable_block_matrix(block_scheduler_type& bs, const size_type height_in_blocks, const size_type width_in_blocks, const bool transposed = false) : bs(bs), height(height_in_blocks), width(width_in_blocks), height_from_supermatrix(0), width_from_supermatrix(0), blocks(height * width), elements_in_blocks_transposed(transposed) { for (size_type row = 0; row < height; ++row) for (size_type col = 0; col < width; ++col) bl(row, col) = bs.allocate_swappable_block(); } //! Create swappable_block_matrix of given dimensions that //! represents the submatrix of supermatrix starting at (from_row_in_blocks, from_col_in_blocks). //! //! If supermatrix is not large enough, the submatrix is padded with empty blocks. //! The supermatrix must not be destructed or transposed before the submatrix is destructed. swappable_block_matrix(const swappable_block_matrix& supermatrix, const size_type height_in_blocks, const size_type width_in_blocks, const size_type from_row_in_blocks, const size_type from_col_in_blocks) : bs(supermatrix.bs), height(height_in_blocks), width(width_in_blocks), height_from_supermatrix(std::min(supermatrix.height - from_row_in_blocks, height)), width_from_supermatrix(std::min(supermatrix.width - from_col_in_blocks, width)), blocks(height * width), elements_in_blocks_transposed(supermatrix.elements_in_blocks_transposed) { for (size_type row = 0; row < height_from_supermatrix; ++row) { for (size_type col = 0; col < width_from_supermatrix; ++col) bl(row, col) = supermatrix.block(row + from_row_in_blocks, col + from_col_in_blocks); for (size_type col = width_from_supermatrix; col < width; ++col) bl(row, col) = bs.allocate_swappable_block(); } for (size_type row = height_from_supermatrix; row < height; ++row) for (size_type col = 0; col < width; ++col) bl(row, col) = bs.allocate_swappable_block(); } //! Create swappable_block_matrix that represents the combination matrix ul ur dl dr. //! //! The submatrices are assumed to be of fitting dimensions and equal transposition. //! The submatrices must not be destructed or transposed before the matrix is destructed. swappable_block_matrix(const swappable_block_matrix& ul, const swappable_block_matrix& ur, const swappable_block_matrix& dl, const swappable_block_matrix& dr) : bs(ul.bs), height(ul.height + dl.height), width(ul.width + ur.width), height_from_supermatrix(height), width_from_supermatrix(width), blocks(height * width), elements_in_blocks_transposed(ul.elements_in_blocks_transposed) { for (size_type row = 0; row < ul.height; ++row) { for (size_type col = 0; col < ul.width; ++col) bl(row, col) = ul.block(row, col); for (size_type col = ul.width; col < width; ++col) bl(row, col) = ur.block(row, col - ul.width); } for (size_type row = ul.height; row < height; ++row) { for (size_type col = 0; col < ul.width; ++col) bl(row, col) = dl.block(row - ul.height, col); for (size_type col = ul.width; col < width; ++col) bl(row, col) = dr.block(row - ul.height, col - ul.width); } } swappable_block_matrix(const swappable_block_matrix& other) : tlx::reference_counter(other), bs(other.bs), height(other.height), width(other.width), height_from_supermatrix(0), width_from_supermatrix(0), blocks(height * width), elements_in_blocks_transposed(false) { for (size_type row = 0; row < height; ++row) for (size_type col = 0; col < width; ++col) bl(row, col) = bs.allocate_swappable_block(); // 0 + other is copying Ops::element_op(*this, other, typename Ops::addition()); } ~swappable_block_matrix() { for (size_type row = 0; row < height_from_supermatrix; ++row) { for (size_type col = width_from_supermatrix; col < width; ++col) bs.free_swappable_block(bl(row, col)); } for (size_type row = height_from_supermatrix; row < height; ++row) for (size_type col = 0; col < width; ++col) bs.free_swappable_block(bl(row, col)); } static size_type block_index_from_elem(elem_size_type index) { return index / BlockSideLength; } static elem_size_type elem_index_in_block_from_elem(elem_size_type index) { return index % BlockSideLength; } // regards transposed elem_size_type elem_index_in_block_from_elem(elem_size_type row, elem_size_type col) const { return (is_transposed()) ? row % BlockSideLength + col % BlockSideLength * BlockSideLength : row % BlockSideLength * BlockSideLength + col % BlockSideLength; } //! get identifier of the block at (row, col) const swappable_block_identifier_type & block(const size_type row, const size_type col) const { return blocks[row * width + col]; } //! get identifier of the block at (row, col) const swappable_block_identifier_type& operator () (const size_type row, const size_type col) const { return block(row, col); } const size_type & get_height() const { return height; } const size_type & get_width() const { return width; } //! if the elements inside the blocks are in transposed order i.e. column-major const bool & is_transposed() const { return elements_in_blocks_transposed; } void transpose() { // transpose matrix of blocks blocks_type bn(blocks.size()); for (size_type row = 0; row < height; ++row) for (size_type col = 0; col < width; ++col) bn[col * height + row] = bl(row, col); bn.swap(blocks); // swap dimensions std::swap(height, width); std::swap(height_from_supermatrix, width_from_supermatrix); elements_in_blocks_transposed = ! elements_in_blocks_transposed; } void set_zero() { for (typename blocks_type::iterator it = blocks.begin(); it != blocks.end(); ++it) bs.deinitialize(*it); } }; //! general iterator type that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class matrix_iterator { protected: using matrix_type = matrix<ValueType, BlockSideLength>; using swappable_block_matrix_type = typename matrix_type::swappable_block_matrix_type; using block_scheduler_type = typename matrix_type::block_scheduler_type; using internal_block_type = typename block_scheduler_type::internal_block_type; using elem_size_type = typename matrix_type::elem_size_type; using block_size_type = typename matrix_type::block_size_type; template <typename VT, unsigned BSL> friend class matrix; template <typename VT, unsigned BSL> friend class const_matrix_iterator; matrix_type* m; elem_size_type current_row, // \ both indices == -1 <=> empty iterator current_col; // / block_size_type current_block_row, current_block_col; internal_block_type* current_iblock; // nullptr if block is not acquired void acquire_current_iblock() { if (! current_iblock) current_iblock = &m->data->bs.acquire(m->data->block(current_block_row, current_block_col)); } void release_current_iblock() { if (current_iblock) { m->data->bs.release(m->data->block(current_block_row, current_block_col), true); current_iblock = 0; } } //! create iterator pointing to given row and col matrix_iterator(matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : m(&matrix), current_row(start_row), current_col(start_col), current_block_row(m->data->block_index_from_elem(start_row)), current_block_col(m->data->block_index_from_elem(start_col)), current_iblock(0) { } //! create empty iterator explicit matrix_iterator(matrix_type& matrix) : m(&matrix), current_row(static_cast<elem_size_type>(-1)), // empty iterator current_col(static_cast<elem_size_type>(-1)), current_block_row(static_cast<block_size_type>(-1)), current_block_col(static_cast<block_size_type>(-1)), current_iblock(0) { } void set_empty() { release_current_iblock(); current_row = static_cast<elem_size_type>(-1); current_col = static_cast<elem_size_type>(-1); current_block_row = static_cast<block_size_type>(-1); current_block_col = static_cast<block_size_type>(-1); } public: matrix_iterator(const matrix_iterator& other) : m(other.m), current_row(other.current_row), current_col(other.current_col), current_block_row(other.current_block_row), current_block_col(other.current_block_col), current_iblock(0) { if (other.current_iblock) acquire_current_iblock(); } matrix_iterator& operator = (const matrix_iterator& other) { set_pos(other.current_row, other.current_col); m = other.m; if (other.current_iblock) acquire_current_iblock(); return *this; } ~matrix_iterator() { release_current_iblock(); } void set_row(const elem_size_type new_row) { const block_size_type new_block_row = m->data->block_index_from_elem(new_row); if (new_block_row != current_block_row) { release_current_iblock(); current_block_row = new_block_row; } current_row = new_row; } void set_col(const elem_size_type new_col) { const block_size_type new_block_col = m->data->block_index_from_elem(new_col); if (new_block_col != current_block_col) { release_current_iblock(); current_block_col = new_block_col; } current_col = new_col; } void set_pos(const elem_size_type new_row, const elem_size_type new_col) { const block_size_type new_block_row = m->data->block_index_from_elem(new_row), new_block_col = m->data->block_index_from_elem(new_col); if (new_block_col != current_block_col || new_block_row != current_block_row) { release_current_iblock(); current_block_row = new_block_row; current_block_col = new_block_col; } current_row = new_row; current_col = new_col; } void set_pos(const std::pair<elem_size_type, elem_size_type> new_pos) { set_pos(new_pos.first, new_pos.second); } const elem_size_type & get_row() const { return current_row; } const elem_size_type & get_col() const { return current_col; } std::pair<elem_size_type, elem_size_type> get_pos() const { return std::make_pair(current_row, current_col); } bool empty() const { return current_row == static_cast<elem_size_type>(-1) && current_col == static_cast<elem_size_type>(-1); } operator bool () const { return ! empty(); } bool operator == (const matrix_iterator& other) const { return current_row == other.current_row && current_col == other.current_col && m == other.m; } //! Returns reference access to the element referenced by the iterator. //! The reference is only valid so long as the iterator is not moved. ValueType& operator * () { acquire_current_iblock(); return (*current_iblock)[m->data->elem_index_in_block_from_elem(current_row, current_col)]; } }; //! row-major iterator that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class matrix_row_major_iterator : public matrix_iterator<ValueType, BlockSideLength> { protected: using matrix_iterator_type = matrix_iterator<ValueType, BlockSideLength>; using matrix_type = typename matrix_iterator_type::matrix_type; using elem_size_type = typename matrix_iterator_type::elem_size_type; template <typename VT, unsigned BSL> friend class matrix; using matrix_iterator_type::m; using matrix_iterator_type::set_empty; //! create iterator pointing to given row and col matrix_row_major_iterator(matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : matrix_iterator_type(matrix, start_row, start_col) { } //! create empty iterator explicit matrix_row_major_iterator(matrix_type& matrix) : matrix_iterator_type(matrix) { } public: //! implicit conversion from matrix_iterator matrix_row_major_iterator(const matrix_iterator_type& matrix_iterator) // NOLINT : matrix_iterator_type(matrix_iterator) { } // Has to be not empty, else behavior is undefined. matrix_row_major_iterator& operator ++ () { if (get_col() + 1 < m->get_width()) // => not matrix_row_major_iterator the end of row, move right set_col(get_col() + 1); else if (get_row() + 1 < m->get_height()) // => at end of row but not last row, move to beginning of next row set_pos(get_row() + 1, 0); else // => at end of matrix, set to empty-state set_empty(); return *this; } // Has to be not empty, else behavior is undefined. matrix_row_major_iterator& operator -- () { if (get_col() - 1 >= 0) // => not at the beginning of row, move left set_col(get_col() - 1); else if (get_row() - 1 >= 0) // => at beginning of row but not first row, move to end of previous row set_pos(get_row() - 1, m->get_width() - 1); else // => at beginning of matrix, set to empty-state set_empty(); return *this; } using matrix_iterator_type::get_row; using matrix_iterator_type::get_col; using matrix_iterator_type::set_col; using matrix_iterator_type::set_pos; }; //! column-major iterator that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class matrix_col_major_iterator : public matrix_iterator<ValueType, BlockSideLength> { protected: using matrix_iterator_type = matrix_iterator<ValueType, BlockSideLength>; using matrix_type = typename matrix_iterator_type::matrix_type; using elem_size_type = typename matrix_iterator_type::elem_size_type; template <typename VT, unsigned BSL> friend class matrix; using matrix_iterator_type::m; using matrix_iterator_type::set_empty; //! create iterator pointing to given row and col matrix_col_major_iterator(matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : matrix_iterator_type(matrix, start_row, start_col) { } //! create empty iterator explicit matrix_col_major_iterator(matrix_type& matrix) : matrix_iterator_type(matrix) { } public: //! implicit conversion from matrix_iterator matrix_col_major_iterator(const matrix_iterator_type& matrix_iterator) // NOLINT : matrix_iterator_type(matrix_iterator) { } // Has to be not empty, else behavior is undefined. matrix_col_major_iterator& operator ++ () { if (get_row() + 1 < m->get_height()) // => not at the end of col, move down set_row(get_row() + 1); else if (get_col() + 1 < m->get_width()) // => at end of col but not last col, move to beginning of next col set_pos(0, get_col() + 1); else // => at end of matrix, set to empty-state set_empty(); return *this; } // Has to be not empty, else behavior is undefined. matrix_col_major_iterator& operator -- () { if (get_row() - 1 >= 0) // => not at the beginning of col, move up set_row(get_row() - 1); else if (get_col() - 1 >= 0) // => at beginning of col but not first col, move to end of previous col set_pos(m->get_height() - 1, get_col() - 1); else // => at beginning of matrix, set to empty-state set_empty(); return *this; } using matrix_iterator_type::get_row; using matrix_iterator_type::get_col; using matrix_iterator_type::set_row; using matrix_iterator_type::set_pos; }; //! general const_iterator type that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class const_matrix_iterator { protected: using matrix_type = matrix<ValueType, BlockSideLength>; using swappable_block_matrix_type = typename matrix_type::swappable_block_matrix_type; using block_scheduler_type = typename matrix_type::block_scheduler_type; using internal_block_type = typename block_scheduler_type::internal_block_type; using elem_size_type = typename matrix_type::elem_size_type; using block_size_type = typename matrix_type::block_size_type; template <typename VT, unsigned BSL> friend class matrix; const matrix_type* m; elem_size_type current_row, // \ both indices == -1 <=> empty iterator current_col; // / block_size_type current_block_row, current_block_col; internal_block_type* current_iblock; // nullptr if block is not acquired void acquire_current_iblock() { if (! current_iblock) current_iblock = &m->data->bs.acquire(m->data->block(current_block_row, current_block_col)); } void release_current_iblock() { if (current_iblock) { m->data->bs.release(m->data->block(current_block_row, current_block_col), false); current_iblock = 0; } } //! create iterator pointing to given row and col const_matrix_iterator(const matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : m(&matrix), current_row(start_row), current_col(start_col), current_block_row(m->data->block_index_from_elem(start_row)), current_block_col(m->data->block_index_from_elem(start_col)), current_iblock(0) { } //! create empty iterator explicit const_matrix_iterator(const matrix_type& matrix) : m(&matrix), current_row(-1), // empty iterator current_col(-1), current_block_row(-1), current_block_col(-1), current_iblock(0) { } void set_empty() { release_current_iblock(); current_row = -1; current_col = -1; current_block_row = -1; current_block_col = -1; } public: explicit const_matrix_iterator(const matrix_iterator<ValueType, BlockSideLength>& other) : m(other.m), current_row(other.current_row), current_col(other.current_col), current_block_row(other.current_block_row), current_block_col(other.current_block_col), current_iblock(0) { if (other.current_iblock) acquire_current_iblock(); } const_matrix_iterator(const const_matrix_iterator& other) : m(other.m), current_row(other.current_row), current_col(other.current_col), current_block_row(other.current_block_row), current_block_col(other.current_block_col), current_iblock(0) { if (other.current_iblock) acquire_current_iblock(); } const_matrix_iterator& operator = (const const_matrix_iterator& other) { set_pos(other.current_row, other.current_col); m = other.m; if (other.current_iblock) acquire_current_iblock(); return *this; } ~const_matrix_iterator() { release_current_iblock(); } void set_row(const elem_size_type new_row) { const block_size_type new_block_row = m->data->block_index_from_elem(new_row); if (new_block_row != current_block_row) { release_current_iblock(); current_block_row = new_block_row; } current_row = new_row; } void set_col(const elem_size_type new_col) { const block_size_type new_block_col = m->data->block_index_from_elem(new_col); if (new_block_col != current_block_col) { release_current_iblock(); current_block_col = new_block_col; } current_col = new_col; } void set_pos(const elem_size_type new_row, const elem_size_type new_col) { const block_size_type new_block_row = m->data->block_index_from_elem(new_row), new_block_col = m->data->block_index_from_elem(new_col); if (new_block_col != current_block_col || new_block_row != current_block_row) { release_current_iblock(); current_block_row = new_block_row; current_block_col = new_block_col; } current_row = new_row; current_col = new_col; } void set_pos(const std::pair<elem_size_type, elem_size_type> new_pos) { set_pos(new_pos.first, new_pos.second); } const elem_size_type & get_row() const { return current_row; } const elem_size_type & get_col() const { return current_col; } std::pair<elem_size_type, elem_size_type> get_pos() const { return std::make_pair(current_row, current_col); } bool empty() const { return current_row == static_cast<elem_size_type>(-1) && current_col == static_cast<elem_size_type>(-1); } operator bool () const { return ! empty(); } bool operator == (const const_matrix_iterator& other) const { return current_row == other.current_row && current_col == other.current_col && m == other.m; } //! Returns reference access to the element referenced by the iterator. //! The reference is only valid so long as the iterator is not moved. const ValueType& operator * () { acquire_current_iblock(); return (*current_iblock)[m->data->elem_index_in_block_from_elem(current_row, current_col)]; } }; //! row-major const_iterator that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class const_matrix_row_major_iterator : public const_matrix_iterator<ValueType, BlockSideLength> { protected: using const_matrix_iterator_type = const_matrix_iterator<ValueType, BlockSideLength>; using matrix_type = typename const_matrix_iterator_type::matrix_type; using elem_size_type = typename const_matrix_iterator_type::elem_size_type; template <typename VT, unsigned BSL> friend class matrix; using const_matrix_iterator_type::m; using const_matrix_iterator_type::set_empty; //! create iterator pointing to given row and col const_matrix_row_major_iterator(const matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : const_matrix_iterator_type(matrix, start_row, start_col) { } //! create empty iterator explicit const_matrix_row_major_iterator(const matrix_type& matrix) : const_matrix_iterator_type(matrix) { } public: //! convert from matrix_iterator const_matrix_row_major_iterator(const const_matrix_row_major_iterator& matrix_iterator) : const_matrix_iterator_type(matrix_iterator) { } //! implicit conversion from matrix_iterator const_matrix_row_major_iterator(const const_matrix_iterator_type& matrix_iterator) // NOLINT : const_matrix_iterator_type(matrix_iterator) { } // Has to be not empty, else behavior is undefined. const_matrix_row_major_iterator& operator ++ () { if (get_col() + 1 < m->get_width()) // => not matrix_row_major_iterator the end of row, move right set_col(get_col() + 1); else if (get_row() + 1 < m->get_height()) // => at end of row but not last row, move to beginning of next row set_pos(get_row() + 1, 0); else // => at end of matrix, set to empty-state set_empty(); return *this; } // Has to be not empty, else behavior is undefined. const_matrix_row_major_iterator& operator -- () { if (get_col() - 1 >= 0) // => not at the beginning of row, move left set_col(get_col() - 1); else if (get_row() - 1 >= 0) // => at beginning of row but not first row, move to end of previous row set_pos(get_row() - 1, m->get_width() - 1); else // => at beginning of matrix, set to empty-state set_empty(); return *this; } using const_matrix_iterator_type::get_row; using const_matrix_iterator_type::get_col; using const_matrix_iterator_type::set_col; using const_matrix_iterator_type::set_pos; }; //! column-major const_iterator that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class const_matrix_col_major_iterator : public const_matrix_iterator<ValueType, BlockSideLength> { protected: using const_matrix_iterator_type = const_matrix_iterator<ValueType, BlockSideLength>; using matrix_type = typename const_matrix_iterator_type::matrix_type; using elem_size_type = typename const_matrix_iterator_type::elem_size_type; template <typename VT, unsigned BSL> friend class matrix; using const_matrix_iterator_type::m; using const_matrix_iterator_type::set_empty; //! create iterator pointing to given row and col const_matrix_col_major_iterator(const matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : const_matrix_iterator_type(matrix, start_row, start_col) { } //! create empty iterator explicit const_matrix_col_major_iterator(const matrix_type& matrix) : const_matrix_iterator_type(matrix) { } public: //! implicit conversion from matrix_iterator const_matrix_col_major_iterator( // NOLINT const matrix_iterator<ValueType, BlockSideLength>& matrix_iterator) // NOLINT : const_matrix_iterator_type(matrix_iterator) { } //! implicit conversion from matrix_iterator const_matrix_col_major_iterator( // NOLINT const const_matrix_iterator_type& matrix_iterator) // NOLINT : const_matrix_iterator_type(matrix_iterator) { } // Has to be not empty, else behavior is undefined. const_matrix_col_major_iterator& operator ++ () { if (get_row() + 1 < m->get_height()) // => not at the end of col, move down set_row(get_row() + 1); else if (get_col() + 1 < m->get_width()) // => at end of col but not last col, move to beginning of next col set_pos(0, get_col() + 1); else // => at end of matrix, set to empty-state set_empty(); return *this; } // Has to be not empty, else behavior is undefined. const_matrix_col_major_iterator& operator -- () { if (get_row() - 1 >= 0) // => not at the beginning of col, move up set_row(get_row() - 1); else if (get_col() - 1 >= 0) // => at beginning of col but not first col, move to end of previous col set_pos(m->get_height() - 1, get_col() - 1); else // => at beginning of matrix, set to empty-state set_empty(); return *this; } using const_matrix_iterator_type::get_row; using const_matrix_iterator_type::get_col; using const_matrix_iterator_type::set_row; using const_matrix_iterator_type::set_pos; }; //! External matrix container. \n //! <b> Introduction </b> to matrix container: see \ref tutorial_matrix tutorial. \n //! <b> Design and Internals </b> of matrix container: see \ref design_matrix. //! //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block //! //! Divides the matrix in square submatrices (blocks). //! Blocks can be swapped individually to and from external memory. //! They are only swapped if necessary to minimize I/O. template <typename ValueType, unsigned BlockSideLength> class matrix { protected: using matrix_type = matrix<ValueType, BlockSideLength>; using swappable_block_matrix_type = swappable_block_matrix<ValueType, BlockSideLength>; using swappable_block_matrix_pointer_type = tlx::counting_ptr<swappable_block_matrix_type>; using block_scheduler_type = typename swappable_block_matrix_type::block_scheduler_type; using block_size_type = typename swappable_block_matrix_type::size_type; using elem_size_type = typename swappable_block_matrix_type::elem_size_type; using Ops = matrix_local::matrix_operations<ValueType, BlockSideLength>; using swappable_block_type = matrix_swappable_block<ValueType, BlockSideLength>; public: using iterator = matrix_iterator<ValueType, BlockSideLength>; using const_iterator = const_matrix_iterator<ValueType, BlockSideLength>; using row_major_iterator = matrix_row_major_iterator<ValueType, BlockSideLength>; using col_major_iterator = matrix_col_major_iterator<ValueType, BlockSideLength>; using const_row_major_iterator = const_matrix_row_major_iterator<ValueType, BlockSideLength>; using const_col_major_iterator = const_matrix_col_major_iterator<ValueType, BlockSideLength>; using column_vector_type = column_vector<ValueType>; using row_vector_type = row_vector<ValueType>; protected: template <typename VT, unsigned BSL> friend class matrix_iterator; template <typename VT, unsigned BSL> friend class const_matrix_iterator; elem_size_type height, width; swappable_block_matrix_pointer_type data; public: //! \name Constructors/Destructors //! \{ //! Creates a new matrix of given dimensions. Elements' values are set to zero. //! \param bs block scheduler used //! \param height height of the created matrix //! \param width width of the created matrix matrix(block_scheduler_type& bs, const elem_size_type height, const elem_size_type width) : height(height), width(width), data( new swappable_block_matrix_type( bs, foxxll::div_ceil(height, BlockSideLength), foxxll::div_ceil(width, BlockSideLength)) ) { } matrix(block_scheduler_type& bs, const column_vector_type& left, const row_vector_type& right) : height(static_cast<elem_size_type>(left.size())), width(static_cast<elem_size_type>(right.size())), data( new swappable_block_matrix_type( bs, foxxll::div_ceil(height, BlockSideLength), foxxll::div_ceil(width, BlockSideLength)) ) { Ops::recursive_matrix_from_vectors(*data, left, right); } ~matrix() { } //! \} //! \name Capacity //! \{ const elem_size_type & get_height() const { return height; } const elem_size_type & get_width() const { return width; } //! \} //! \name Iterators //! \{ iterator begin() { data.unify(); return iterator(*this, 0, 0); } const_iterator begin() const { return const_iterator(*this, 0, 0); } const_iterator cbegin() const { return const_iterator(*this, 0, 0); } iterator end() { data.unify(); return iterator(*this); } const_iterator end() const { return const_iterator(*this); } const_iterator cend() const { return const_iterator(*this); } const_iterator operator () (const elem_size_type row, const elem_size_type col) const { return const_iterator(*this, row, col); } iterator operator () (const elem_size_type row, const elem_size_type col) { data.unify(); return iterator(*this, row, col); } //! \} //! \name Modifiers //! \{ void transpose() { data.unify(); data->transpose(); std::swap(height, width); } void set_zero() { if (data.unique()) data->set_zero(); else data = tlx::make_counting<swappable_block_matrix_type>( data->bs, foxxll::div_ceil(height, BlockSideLength), foxxll::div_ceil(width, BlockSideLength)); } //! \} //! \name Operations //! \{ matrix_type operator + (const matrix_type& right) const { assert(height == right.height && width == right.width); matrix_type res(data->bs, height, width); Ops::element_op(*res.data, *data, *right.data, typename Ops::addition()); // more efficient than copying this and then adding right return res; } matrix_type operator - (const matrix_type& right) const { assert(height == right.height && width == right.width); matrix_type res(data->bs, height, width); Ops::element_op(*res.data, *data, *right.data, typename Ops::subtraction()); // more efficient than copying this and then subtracting right return res; } matrix_type operator * (const matrix_type& right) const { return multiply(right); } matrix_type operator * (const ValueType scalar) const { matrix_type res(data->bs, height, width); Ops::element_op(*res.data, *data, typename Ops::scalar_multiplication(scalar)); return res; } matrix_type& operator += (const matrix_type& right) { assert(height == right.height && width == right.width); data.unify(); Ops::element_op(*data, *right.data, typename Ops::addition()); return *this; } matrix_type& operator -= (const matrix_type& right) { assert(height == right.height && width == right.width); data.unify(); Ops::element_op(*data, *right.data, typename Ops::subtraction()); return *this; } matrix_type& operator *= (const matrix_type& right) { return *this = operator * (right); } // implicitly unifies by constructing a result-matrix matrix_type& operator *= (const ValueType scalar) { data.unify(); Ops::element_op(*data, typename Ops::scalar_multiplication(scalar)); return *this; } column_vector_type operator * (const column_vector_type& right) const { assert(elem_size_type(right.size()) == width); column_vector_type res(height); res.set_zero(); Ops::recursive_matrix_col_vector_multiply_and_add(*data, right, res); return res; } row_vector_type multiply_from_left(const row_vector_type& left) const { assert(elem_size_type(left.size()) == height); row_vector_type res(width); res.set_zero(); Ops::recursive_matrix_row_vector_multiply_and_add(left, *data, res); return res; } //! multiply with another matrix //! \param right matrix to multiply with //! \param multiplication_algorithm allows to choose the applied algorithm //! \param scheduling_algorithm allows to choose the applied algorithm //! //! Available algorithms are: \n //! 0: naive_multiply_and_add (I/O inefficient, slow) \n //! 1: recursive_multiply_and_add (recommended, default, stable time and I/O complexity) \n //! 2: strassen_winograd_multiply_and_add (sometimes fast but unstable time and I/O complexity) \n //! 3: multi_level_strassen_winograd_multiply_and_add (sometimes fast but unstable time and I/O complexity) \n //! 4: strassen_winograd_multiply, optimized pre- and postadditions (sometimes fast but unstable time and I/O complexity) \n //! 5: strassen_winograd_multiply_and_add_interleaved, optimized preadditions (sometimes fast but unstable time and I/O complexity) \n //! 6: multi_level_strassen_winograd_multiply_and_add_block_grained (sometimes fast but unstable time and I/O complexity) matrix_type multiply(const matrix_type& right, const int multiplication_algorithm = 1, const int scheduling_algorithm = 2) const { assert(width == right.height); assert(&data->bs == &right.data->bs); matrix_type res(data->bs, height, right.width); if (scheduling_algorithm > 0) { // all offline algos need a simulation-run delete data->bs.switch_algorithm_to( new foxxll::block_scheduler_algorithm_simulation<swappable_block_type>(data->bs)); switch (multiplication_algorithm) { case 0: Ops::naive_multiply_and_add(*data, *right.data, *res.data); break; case 1: Ops::recursive_multiply_and_add(*data, *right.data, *res.data); break; case 2: Ops::strassen_winograd_multiply_and_add(*data, *right.data, *res.data); break; case 3: Ops::multi_level_strassen_winograd_multiply_and_add(*data, *right.data, *res.data); break; case 4: Ops::strassen_winograd_multiply(*data, *right.data, *res.data); break; case 5: Ops::strassen_winograd_multiply_and_add_interleaved(*data, *right.data, *res.data); break; case 6: Ops::multi_level_strassen_winograd_multiply_and_add_block_grained(*data, *right.data, *res.data); break; default: LOG1 << "invalid multiplication-algorithm number"; break; } } switch (scheduling_algorithm) { case 0: delete data->bs.switch_algorithm_to( new foxxll::block_scheduler_algorithm_online_lru<swappable_block_type>(data->bs)); break; case 1: delete data->bs.switch_algorithm_to( new foxxll::block_scheduler_algorithm_offline_lfd<swappable_block_type>(data->bs)); break; case 2: delete data->bs.switch_algorithm_to( new foxxll::block_scheduler_algorithm_offline_lru_prefetching<swappable_block_type>(data->bs)); break; default: LOG1 << "invalid scheduling-algorithm number"; } switch (multiplication_algorithm) { case 0: Ops::naive_multiply_and_add(*data, *right.data, *res.data); break; case 1: Ops::recursive_multiply_and_add(*data, *right.data, *res.data); break; case 2: Ops::strassen_winograd_multiply_and_add(*data, *right.data, *res.data); break; case 3: Ops::multi_level_strassen_winograd_multiply_and_add(*data, *right.data, *res.data); break; case 4: Ops::strassen_winograd_multiply(*data, *right.data, *res.data); break; case 5: Ops::strassen_winograd_multiply_and_add_interleaved(*data, *right.data, *res.data); break; case 6: Ops::multi_level_strassen_winograd_multiply_and_add_block_grained(*data, *right.data, *res.data); break; default: LOG1 << "invalid multiplication-algorithm number"; break; } delete data->bs.switch_algorithm_to( new foxxll::block_scheduler_algorithm_online_lru<swappable_block_type>(data->bs)); return res; } //! Use internal memory multiplication. Designated for testing. May exceed memory limitations. matrix_type multiply_internal(const matrix_type& right, const int scheduling_algorithm = 2) const { assert(width == right.height); assert(&data->bs == &right.data->bs); matrix_type res(data->bs, height, right.width); if (scheduling_algorithm > 0) { // all offline algos need a simulation-run delete data->bs.switch_algorithm_to( new foxxll::block_scheduler_algorithm_simulation<swappable_block_type>(data->bs)); multiply_internal(right, res); } switch (scheduling_algorithm) { case 0: delete data->bs.switch_algorithm_to( new foxxll::block_scheduler_algorithm_online_lru<swappable_block_type>(data->bs)); break; case 1: delete data->bs.switch_algorithm_to( new foxxll::block_scheduler_algorithm_offline_lfd<swappable_block_type>(data->bs)); break; case 2: delete data->bs.switch_algorithm_to( new foxxll::block_scheduler_algorithm_offline_lru_prefetching<swappable_block_type>(data->bs)); break; default: LOG1 << "invalid scheduling-algorithm number"; } multiply_internal(right, res); delete data->bs.switch_algorithm_to( new foxxll::block_scheduler_algorithm_online_lru<swappable_block_type>(data->bs)); return res; } //! \} protected: void multiply_internal(const matrix_type& right, matrix_type& res) const { ValueType* A = new ValueType[height * width]; ValueType* B = new ValueType[right.height * right.width]; ValueType* C = new ValueType[res.height * res.width]; ValueType* vit; vit = A; for (const_row_major_iterator mit = cbegin(); mit != cend(); ++mit, ++vit) *vit = *mit; vit = B; for (const_row_major_iterator mit = right.cbegin(); mit != right.cend(); ++mit, ++vit) *vit = *mit; if (! res.data->bs.is_simulating()) { #if STXXL_BLAS gemm_wrapper(height, width, res.width, ValueType(1), false, A, false, B, ValueType(0), false, C); #else assert(false /* internal multiplication is only available for testing with blas */); #endif } vit = C; for (row_major_iterator mit = res.begin(); mit != res.end(); ++mit, ++vit) *mit = *vit; delete[] A; delete[] B; delete[] C; } }; //! \} } // namespace stxxl #endif // !STXXL_CONTAINERS_MATRIX_HEADER
updater_basemaker-inl.h
/*! * Copyright 2014-2022 by XGBoost Contributors * \file updater_basemaker-inl.h * \brief implement a common tree constructor * \author Tianqi Chen */ #ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include <rabit/rabit.h> #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "xgboost/base.h" #include "xgboost/json.h" #include "xgboost/tree_updater.h" #include "param.h" #include "constraints.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" #include "../common/threading_utils.h" namespace xgboost { namespace tree { /*! * \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker: public TreeUpdater { public: void Configure(const Args& args) override { param_.UpdateAllowUnknown(args); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("train_param"), &this->param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["train_param"] = ToJson(param_); } protected: // helper to collect and query feature meta information struct FMetaHelper { public: /*! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix* p_fmat, const RegTree& tree) { fminmax_.resize(tree.param.num_feature * 2); std::fill(fminmax_.begin(), fminmax_.end(), -std::numeric_limits<bst_float>::max()); // start accumulating statistics for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) { auto page = batch.GetView(); for (bst_uint fid = 0; fid < batch.Size(); ++fid) { auto c = page[fid]; if (c.size() != 0) { CHECK_LT(fid * 2, fminmax_.size()); fminmax_[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]); fminmax_[fid * 2 + 1] = std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]); } } } } /*! \brief synchronize the information */ inline void SyncInfo() { rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size()); } // get feature type, 0:empty 1:binary 2:real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax_.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax_[fid * 2]; bst_float b = fminmax_[fid * 2 + 1]; if (a == -std::numeric_limits<bst_float>::max()) return 0; if (-a == b) { return 1; } else { return 2; } } bst_float MaxValue(bst_uint fid) const { return fminmax_[fid *2 + 1]; } void SampleCol(float p, std::vector<bst_feature_t> *p_findex) const { std::vector<bst_feature_t> &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax_.size(); i += 2) { const auto fid = static_cast<bst_uint>(i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } auto n = static_cast<unsigned>(p * findex.size()); std::shuffle(findex.begin(), findex.end(), common::GlobalRandom()); findex.resize(n); // sync the findex if it is subsample std::string s_cache; common::MemoryBufferStream fc(&s_cache); dmlc::Stream& fs = fc; if (rabit::GetRank() == 0) { fs.Write(findex); } rabit::Broadcast(&s_cache, 0); fs.Read(&findex); } private: std::vector<bst_float> fminmax_; }; // ------static helper functions ------ // helper function to get to next level of the tree /*! \brief this is helper function for row based data*/ inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) { const RegTree::Node &n = tree[nid]; bst_uint findex = n.SplitIndex(); for (const auto& ins : inst) { if (findex == ins.index) { if (ins.fvalue < n.SplitCond()) { return n.LeftChild(); } else { return n.RightChild(); } } } return n.DefaultChild(); } // ------class member helpers--------- /*! \brief initialize temp data structure */ inline void InitData(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree) { { // setup position position_.resize(gpair.size()); std::fill(position_.begin(), position_.end(), 0); // mark delete for the deleted datas for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i]; } // mark subsample if (param_.subsample < 1.0f) { CHECK_EQ(param_.sampling_method, TrainParam::kUniform) << "Only uniform sampling is supported, " << "gradient-based sampling is only support by GPU Hist."; std::bernoulli_distribution coin_flip(param_.subsample); auto& rnd = common::GlobalRandom(); for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) continue; if (!coin_flip(rnd)) position_[i] = ~position_[i]; } } } { // expand query qexpand_.reserve(256); qexpand_.clear(); qexpand_.push_back(0); this->UpdateNode2WorkIndex(tree); } this->interaction_constraints_.Configure(param_, fmat.Info().num_col_); } /*! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree &tree) { std::vector<int> newnodes; for (int nid : qexpand_) { if (!tree[nid].IsLeaf()) { newnodes.push_back(tree[nid].LeftChild()); newnodes.push_back(tree[nid].RightChild()); } } // use new nodes for qexpand qexpand_ = newnodes; this->UpdateNode2WorkIndex(tree); } // return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position_[ridx]; return pid < 0 ? ~pid : pid; } // encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position_[ridx] < 0) { position_[ridx] = ~nid; } else { position_[ridx] = nid; } } /*! * \brief This is a helper function that uses a column based data structure * and reset the positions to the latest one * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void ResetPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { // set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /*! * \brief helper function to set the non-leaf positions to default direction. * This function can be applied multiple times and will get the same result. * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void SetDefaultPostion(DMatrix *p_fmat, const RegTree &tree) { // set default direct nodes to default // for leaf nodes that are not fresh, mark then to ~nid, // so that they are ignored in future statistics collection common::ParallelFor(p_fmat->Info().num_row_, ctx_->Threads(), [&](auto ridx) { const int nid = this->DecodePosition(ridx); if (tree[nid].IsLeaf()) { // mark finish when it is not a fresh leaf if (tree[nid].RightChild() == -1) { position_[ridx] = ~nid; } } else { // push to default branch if (tree[nid].DefaultLeft()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } }); } /*! * \brief this is helper function uses column based data structure, * to CORRECT the positions of non-default directions that WAS set to default * before calling this function. * \param batch The column batch * \param sorted_split_set The set of index that contains split solutions. * \param tree the regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set, const RegTree &tree) { auto page = batch.GetView(); for (size_t fid = 0; fid < batch.Size(); ++fid) { auto col = page[fid]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { common::ParallelFor(col.size(), ctx_->Threads(), [&](auto j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].IsLeaf()); int pid = tree[nid].Parent(); // go back to parent, correct those who are not default if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) { if (fvalue < tree[pid].SplitCond()) { this->SetEncodePosition(ridx, tree[pid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[pid].RightChild()); } } }); } } } /*! * \brief this is helper function uses column based data structure, * \param nodes the set of nodes that contains the split to be used * \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector<int> &nodes, const RegTree &tree, std::vector<unsigned>* out_split_set) { std::vector<unsigned>& fsplits = *out_split_set; fsplits.clear(); // step 1, classify the non-default data into right places for (int nid : nodes) { if (!tree[nid].IsLeaf()) { fsplits.push_back(tree[nid].SplitIndex()); } } std::sort(fsplits.begin(), fsplits.end()); fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /*! * \brief this is helper function uses column based data structure, * update all positions into nondefault branch, if any, ignore the default branch * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { std::vector<unsigned> fsplits; this->GetSplitSet(nodes, tree, &fsplits); for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) { auto page = batch.GetView(); for (auto fid : fsplits) { auto col = page[fid]; common::ParallelFor(col.size(), ctx_->Threads(), [&](auto j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); // go back to parent, correct those who are not default if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) { if (fvalue < tree[nid].SplitCond()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } }); } } } /*! \brief helper function to get statistics from a tree */ template<typename TStats> inline void GetNodeStats(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree, std::vector< std::vector<TStats> > *p_thread_temp, std::vector<TStats> *p_node_stats) { std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp; thread_temp.resize(ctx_->Threads()); p_node_stats->resize(tree.param.num_nodes); dmlc::OMPException exc; #pragma omp parallel num_threads(ctx_->Threads()) { exc.Run([&]() { const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats()); for (unsigned int nid : qexpand_) { thread_temp[tid][nid] = TStats(); } }); } exc.Rethrow(); // setup position common::ParallelFor(fmat.Info().num_row_, ctx_->Threads(), [&](auto ridx) { const int nid = position_[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair[ridx]); } }); // sum the per thread statistics together for (int nid : qexpand_) { TStats &s = (*p_node_stats)[nid]; s = TStats(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } using SketchEntry = common::SortedQuantile; /*! \brief training parameter of tree grower */ TrainParam param_; /*! \brief queue of nodes to be expanded */ std::vector<int> qexpand_; /*! * \brief map active node to is working index offset in qexpand, * can be -1, which means the node is node actively expanding */ std::vector<int> node2workindex_; /*! * \brief position of each instance in the tree * can be negative, which means this position is no longer expanding * see also Decode/EncodePosition */ std::vector<int> position_; FeatureInteractionConstraintHost interaction_constraints_; private: inline void UpdateNode2WorkIndex(const RegTree &tree) { // update the node2workindex std::fill(node2workindex_.begin(), node2workindex_.end(), -1); node2workindex_.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand_.size(); ++i) { node2workindex_[qexpand_[i]] = static_cast<int>(i); } } }; } // namespace tree } // namespace xgboost #endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
dense_distances.c
/* Generated by Cython 0.15.1 on Fri Jan 6 16:49:30 2012 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #else #include <stddef.h> /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #if PY_VERSION_HEX < 0x02040000 #define METH_COEXIST 0 #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type) #define PyDict_Contains(d,o) PySequence_Contains(d,o) #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) PyNumber_Int(o) #define PyIndex_Check(o) PyNumber_Check(o) #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #define __PYX_HAVE__dense_distances #define __PYX_HAVE_API__dense_distances #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || defined(__INTEL_COMPILER) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "dense_distances.pyx", "numpy.pxd", }; /* "numpy.pxd":719 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":720 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":721 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":722 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":726 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":727 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":728 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":729 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":733 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":734 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":743 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":744 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":745 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":747 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":748 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":749 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":751 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":752 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":754 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":755 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":756 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "dense_distances.pyx":12 * DTYPE = np.float * # corresponding compile-time type * ctypedef np.float_t DTYPE_t # <<<<<<<<<<<<<< * * */ typedef __pyx_t_5numpy_float_t __pyx_t_15dense_distances_DTYPE_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":758 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":759 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":760 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":762 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/ static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ /* Run-time type information about structs used with buffers */ struct __Pyx_StructField_; typedef struct { const char* name; /* for error messages only */ struct __Pyx_StructField_* fields; size_t size; /* sizeof(type) */ char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject */ } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ #define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) #include <string.h> void __pyx_init_nan(void); static float __PYX_NAN; static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/ #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif Py_ssize_t __Pyx_zeros[] = {0, 0}; Py_ssize_t __Pyx_minusones[] = {-1, -1}; static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, int __pyx_lineno, const char *__pyx_filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *); /*proto*/ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *, PyObject *); /*proto*/ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *, PyObject *, PyObject *); /*proto*/ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *, PyObject *); /*proto*/ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *); /*proto*/ /* Module declarations from 'cython.cython.view' */ /* Module declarations from 'cython' */ /* Module declarations from 'dense_distances' */ static __pyx_t_15dense_distances_DTYPE_t __pyx_f_15dense_distances_chisquare(__pyx_t_15dense_distances_DTYPE_t, __pyx_t_15dense_distances_DTYPE_t); /*proto*/ static __pyx_t_15dense_distances_DTYPE_t __pyx_f_15dense_distances_euclidean(__pyx_t_15dense_distances_DTYPE_t, __pyx_t_15dense_distances_DTYPE_t); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_15dense_distances_DTYPE_t), 'R' }; #define __Pyx_MODULE_NAME "dense_distances" int __pyx_module_is_main_dense_distances = 0; /* Implementation of 'dense_distances' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static char __pyx_k_1[] = "Dimension mismatch"; static char __pyx_k_2[] = "Matrix dimension mismatch"; static char __pyx_k_3[] = "ndarray is not C contiguous"; static char __pyx_k_5[] = "ndarray is not Fortran contiguous"; static char __pyx_k_7[] = "Non-native byte order not supported"; static char __pyx_k_9[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_10[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_13[] = "Format string allocated too short."; static char __pyx_k__B[] = "B"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__b[] = "b"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__l[] = "l"; static char __pyx_k__m[] = "m"; static char __pyx_k__q[] = "q"; static char __pyx_k__x[] = "x"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__m1[] = "m1"; static char __pyx_k__m2[] = "m2"; static char __pyx_k__np[] = "np"; static char __pyx_k__x1[] = "x1"; static char __pyx_k__x2[] = "x2"; static char __pyx_k__DTYPE[] = "DTYPE"; static char __pyx_k__float[] = "float"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k__zeros[] = "zeros"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static char __pyx_k__m2m_chisquare[] = "m2m_chisquare"; static char __pyx_k__m2m_euclidean[] = "m2m_euclidean"; static char __pyx_k__v2m_chisquare[] = "v2m_chisquare"; static char __pyx_k__v2m_euclidean[] = "v2m_euclidean"; static char __pyx_k__v2v_chisquare[] = "v2v_chisquare"; static char __pyx_k__v2v_euclidean[] = "v2v_euclidean"; static char __pyx_k__gram_chisquare[] = "gram_chisquare"; static char __pyx_k__gram_euclidean[] = "gram_euclidean"; static char __pyx_k__dense_distances[] = "dense_distances"; static PyObject *__pyx_kp_s_1; static PyObject *__pyx_kp_u_10; static PyObject *__pyx_kp_u_13; static PyObject *__pyx_kp_s_2; static PyObject *__pyx_kp_u_3; static PyObject *__pyx_kp_u_5; static PyObject *__pyx_kp_u_7; static PyObject *__pyx_kp_u_9; static PyObject *__pyx_n_s__DTYPE; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s__dense_distances; static PyObject *__pyx_n_s__float; static PyObject *__pyx_n_s__gram_chisquare; static PyObject *__pyx_n_s__gram_euclidean; static PyObject *__pyx_n_s__m; static PyObject *__pyx_n_s__m1; static PyObject *__pyx_n_s__m2; static PyObject *__pyx_n_s__m2m_chisquare; static PyObject *__pyx_n_s__m2m_euclidean; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__v2m_chisquare; static PyObject *__pyx_n_s__v2m_euclidean; static PyObject *__pyx_n_s__v2v_chisquare; static PyObject *__pyx_n_s__v2v_euclidean; static PyObject *__pyx_n_s__x; static PyObject *__pyx_n_s__x1; static PyObject *__pyx_n_s__x2; static PyObject *__pyx_n_s__zeros; static PyObject *__pyx_int_15; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_8; static PyObject *__pyx_k_tuple_11; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_14; /* "dense_distances.pyx":15 * * * cdef DTYPE_t chisquare(DTYPE_t x, DTYPE_t y) nogil: # <<<<<<<<<<<<<< * cdef DTYPE_t d * if (x + y) > 0: */ static __pyx_t_15dense_distances_DTYPE_t __pyx_f_15dense_distances_chisquare(__pyx_t_15dense_distances_DTYPE_t __pyx_v_x, __pyx_t_15dense_distances_DTYPE_t __pyx_v_y) { __pyx_t_15dense_distances_DTYPE_t __pyx_v_d; __pyx_t_15dense_distances_DTYPE_t __pyx_r; int __pyx_t_1; /* "dense_distances.pyx":17 * cdef DTYPE_t chisquare(DTYPE_t x, DTYPE_t y) nogil: * cdef DTYPE_t d * if (x + y) > 0: # <<<<<<<<<<<<<< * d = ((x - y) * (x - y)) / (x + y) * else: */ __pyx_t_1 = ((__pyx_v_x + __pyx_v_y) > 0.0); if (__pyx_t_1) { /* "dense_distances.pyx":18 * cdef DTYPE_t d * if (x + y) > 0: * d = ((x - y) * (x - y)) / (x + y) # <<<<<<<<<<<<<< * else: * d = 0.0 */ __pyx_v_d = (((__pyx_v_x - __pyx_v_y) * (__pyx_v_x - __pyx_v_y)) / (__pyx_v_x + __pyx_v_y)); goto __pyx_L3; } /*else*/ { /* "dense_distances.pyx":20 * d = ((x - y) * (x - y)) / (x + y) * else: * d = 0.0 # <<<<<<<<<<<<<< * return d * */ __pyx_v_d = 0.0; } __pyx_L3:; /* "dense_distances.pyx":21 * else: * d = 0.0 * return d # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_d; goto __pyx_L0; __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "dense_distances.pyx":24 * * * cdef DTYPE_t euclidean(DTYPE_t x, DTYPE_t y) nogil: # <<<<<<<<<<<<<< * return (x - y) * (x - y) * */ static __pyx_t_15dense_distances_DTYPE_t __pyx_f_15dense_distances_euclidean(__pyx_t_15dense_distances_DTYPE_t __pyx_v_x, __pyx_t_15dense_distances_DTYPE_t __pyx_v_y) { __pyx_t_15dense_distances_DTYPE_t __pyx_r; /* "dense_distances.pyx":25 * * cdef DTYPE_t euclidean(DTYPE_t x, DTYPE_t y) nogil: * return (x - y) * (x - y) # <<<<<<<<<<<<<< * * */ __pyx_r = ((__pyx_v_x - __pyx_v_y) * (__pyx_v_x - __pyx_v_y)); goto __pyx_L0; __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "dense_distances.pyx":28 * * * def v2v_euclidean(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2): # <<<<<<<<<<<<<< * cdef int d = len(x1) * cdef int _d = len(x2) */ static PyObject *__pyx_pf_15dense_distances_v2v_euclidean(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15dense_distances_v2v_euclidean = {__Pyx_NAMESTR("v2v_euclidean"), (PyCFunction)__pyx_pf_15dense_distances_v2v_euclidean, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pf_15dense_distances_v2v_euclidean(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_x1 = 0; PyArrayObject *__pyx_v_x2 = 0; int __pyx_v_d; int __pyx_v__d; int __pyx_v_i; __pyx_t_15dense_distances_DTYPE_t __pyx_v_dist; Py_buffer __pyx_bstruct_x1; Py_ssize_t __pyx_bstride_0_x1 = 0; Py_ssize_t __pyx_bshape_0_x1 = 0; Py_buffer __pyx_bstruct_x2; Py_ssize_t __pyx_bstride_0_x2 = 0; Py_ssize_t __pyx_bshape_0_x2 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x1,&__pyx_n_s__x2,0}; __Pyx_RefNannySetupContext("v2v_euclidean"); __pyx_self = __pyx_self; { PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (PyTuple_GET_SIZE(__pyx_args)) { case 0: values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x1); if (likely(values[0])) kw_args--; else goto __pyx_L5_argtuple_error; case 1: values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x2); if (likely(values[1])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("v2v_euclidean", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "v2v_euclidean") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_x1 = ((PyArrayObject *)values[0]); __pyx_v_x2 = ((PyArrayObject *)values[1]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("v2v_euclidean", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("dense_distances.v2v_euclidean", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_bstruct_x1.buf = NULL; __pyx_bstruct_x2.buf = NULL; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x1), __pyx_ptype_5numpy_ndarray, 1, "x1", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x2), __pyx_ptype_5numpy_ndarray, 1, "x2", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x1, (PyObject*)__pyx_v_x1, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_x1 = __pyx_bstruct_x1.strides[0]; __pyx_bshape_0_x1 = __pyx_bstruct_x1.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x2, (PyObject*)__pyx_v_x2, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_x2 = __pyx_bstruct_x2.strides[0]; __pyx_bshape_0_x2 = __pyx_bstruct_x2.shape[0]; /* "dense_distances.pyx":29 * * def v2v_euclidean(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2): * cdef int d = len(x1) # <<<<<<<<<<<<<< * cdef int _d = len(x2) * assert d == _d, "Dimension mismatch" */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x1)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_d = __pyx_t_1; /* "dense_distances.pyx":30 * def v2v_euclidean(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2): * cdef int d = len(x1) * cdef int _d = len(x2) # <<<<<<<<<<<<<< * assert d == _d, "Dimension mismatch" * cdef int i */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x2)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v__d = __pyx_t_1; /* "dense_distances.pyx":31 * cdef int d = len(x1) * cdef int _d = len(x2) * assert d == _d, "Dimension mismatch" # <<<<<<<<<<<<<< * cdef int i * cdef DTYPE_t dist = 0.0 */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!(__pyx_v_d == __pyx_v__d))) { PyErr_SetObject(PyExc_AssertionError, ((PyObject *)__pyx_kp_s_1)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #endif /* "dense_distances.pyx":33 * assert d == _d, "Dimension mismatch" * cdef int i * cdef DTYPE_t dist = 0.0 # <<<<<<<<<<<<<< * for i in range(d): * dist += euclidean(x1[i], x2[i]) */ __pyx_v_dist = 0.0; /* "dense_distances.pyx":34 * cdef int i * cdef DTYPE_t dist = 0.0 * for i in range(d): # <<<<<<<<<<<<<< * dist += euclidean(x1[i], x2[i]) * return dist */ __pyx_t_2 = __pyx_v_d; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "dense_distances.pyx":35 * cdef DTYPE_t dist = 0.0 * for i in range(d): * dist += euclidean(x1[i], x2[i]) # <<<<<<<<<<<<<< * return dist * */ __pyx_t_4 = __pyx_v_i; if (__pyx_t_4 < 0) __pyx_t_4 += __pyx_bshape_0_x1; __pyx_t_5 = __pyx_v_i; if (__pyx_t_5 < 0) __pyx_t_5 += __pyx_bshape_0_x2; __pyx_v_dist = (__pyx_v_dist + __pyx_f_15dense_distances_euclidean((*__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_x1.buf, __pyx_t_4, __pyx_bstride_0_x1)), (*__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_x2.buf, __pyx_t_5, __pyx_bstride_0_x2)))); } /* "dense_distances.pyx":36 * for i in range(d): * dist += euclidean(x1[i], x2[i]) * return dist # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_6 = PyFloat_FromDouble(__pyx_v_dist); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x1); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x2); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("dense_distances.v2v_euclidean", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x1); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x2); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "dense_distances.pyx":39 * * * def v2v_chisquare(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2): # <<<<<<<<<<<<<< * cdef int d = len(x1) * cdef int _d = len(x2) */ static PyObject *__pyx_pf_15dense_distances_1v2v_chisquare(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15dense_distances_1v2v_chisquare = {__Pyx_NAMESTR("v2v_chisquare"), (PyCFunction)__pyx_pf_15dense_distances_1v2v_chisquare, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pf_15dense_distances_1v2v_chisquare(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_x1 = 0; PyArrayObject *__pyx_v_x2 = 0; int __pyx_v_d; int __pyx_v__d; int __pyx_v_i; __pyx_t_15dense_distances_DTYPE_t __pyx_v_dist; Py_buffer __pyx_bstruct_x1; Py_ssize_t __pyx_bstride_0_x1 = 0; Py_ssize_t __pyx_bshape_0_x1 = 0; Py_buffer __pyx_bstruct_x2; Py_ssize_t __pyx_bstride_0_x2 = 0; Py_ssize_t __pyx_bshape_0_x2 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x1,&__pyx_n_s__x2,0}; __Pyx_RefNannySetupContext("v2v_chisquare"); __pyx_self = __pyx_self; { PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (PyTuple_GET_SIZE(__pyx_args)) { case 0: values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x1); if (likely(values[0])) kw_args--; else goto __pyx_L5_argtuple_error; case 1: values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x2); if (likely(values[1])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("v2v_chisquare", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "v2v_chisquare") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_x1 = ((PyArrayObject *)values[0]); __pyx_v_x2 = ((PyArrayObject *)values[1]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("v2v_chisquare", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("dense_distances.v2v_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_bstruct_x1.buf = NULL; __pyx_bstruct_x2.buf = NULL; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x1), __pyx_ptype_5numpy_ndarray, 1, "x1", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x2), __pyx_ptype_5numpy_ndarray, 1, "x2", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x1, (PyObject*)__pyx_v_x1, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_x1 = __pyx_bstruct_x1.strides[0]; __pyx_bshape_0_x1 = __pyx_bstruct_x1.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x2, (PyObject*)__pyx_v_x2, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_x2 = __pyx_bstruct_x2.strides[0]; __pyx_bshape_0_x2 = __pyx_bstruct_x2.shape[0]; /* "dense_distances.pyx":40 * * def v2v_chisquare(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2): * cdef int d = len(x1) # <<<<<<<<<<<<<< * cdef int _d = len(x2) * assert d == _d, "Dimension mismatch" */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x1)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_d = __pyx_t_1; /* "dense_distances.pyx":41 * def v2v_chisquare(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2): * cdef int d = len(x1) * cdef int _d = len(x2) # <<<<<<<<<<<<<< * assert d == _d, "Dimension mismatch" * cdef int i */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x2)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v__d = __pyx_t_1; /* "dense_distances.pyx":42 * cdef int d = len(x1) * cdef int _d = len(x2) * assert d == _d, "Dimension mismatch" # <<<<<<<<<<<<<< * cdef int i * cdef DTYPE_t dist = 0.0 */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!(__pyx_v_d == __pyx_v__d))) { PyErr_SetObject(PyExc_AssertionError, ((PyObject *)__pyx_kp_s_1)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #endif /* "dense_distances.pyx":44 * assert d == _d, "Dimension mismatch" * cdef int i * cdef DTYPE_t dist = 0.0 # <<<<<<<<<<<<<< * for i in range(d): * dist += chisquare(x1[i], x2[i]) */ __pyx_v_dist = 0.0; /* "dense_distances.pyx":45 * cdef int i * cdef DTYPE_t dist = 0.0 * for i in range(d): # <<<<<<<<<<<<<< * dist += chisquare(x1[i], x2[i]) * return dist */ __pyx_t_2 = __pyx_v_d; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "dense_distances.pyx":46 * cdef DTYPE_t dist = 0.0 * for i in range(d): * dist += chisquare(x1[i], x2[i]) # <<<<<<<<<<<<<< * return dist * */ __pyx_t_4 = __pyx_v_i; if (__pyx_t_4 < 0) __pyx_t_4 += __pyx_bshape_0_x1; __pyx_t_5 = __pyx_v_i; if (__pyx_t_5 < 0) __pyx_t_5 += __pyx_bshape_0_x2; __pyx_v_dist = (__pyx_v_dist + __pyx_f_15dense_distances_chisquare((*__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_x1.buf, __pyx_t_4, __pyx_bstride_0_x1)), (*__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_x2.buf, __pyx_t_5, __pyx_bstride_0_x2)))); } /* "dense_distances.pyx":47 * for i in range(d): * dist += chisquare(x1[i], x2[i]) * return dist # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_6 = PyFloat_FromDouble(__pyx_v_dist); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x1); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x2); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("dense_distances.v2v_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x1); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x2); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "dense_distances.pyx":50 * * * def v2m_euclidean(np.ndarray[DTYPE_t, ndim=1] x, np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<< * """ Euclidean distances between vector x and row vectors in m * """ */ static PyObject *__pyx_pf_15dense_distances_2v2m_euclidean(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_15dense_distances_2v2m_euclidean[] = " Euclidean distances between vector x and row vectors in m\n "; static PyMethodDef __pyx_mdef_15dense_distances_2v2m_euclidean = {__Pyx_NAMESTR("v2m_euclidean"), (PyCFunction)__pyx_pf_15dense_distances_2v2m_euclidean, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_15dense_distances_2v2m_euclidean)}; static PyObject *__pyx_pf_15dense_distances_2v2m_euclidean(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_m = 0; int __pyx_v_d; int __pyx_v_n; int __pyx_v__d; PyArrayObject *__pyx_v_res = 0; int __pyx_v_i; int __pyx_v_j; Py_buffer __pyx_bstruct_res; Py_ssize_t __pyx_bstride_0_res = 0; Py_ssize_t __pyx_bshape_0_res = 0; Py_buffer __pyx_bstruct_m; Py_ssize_t __pyx_bstride_0_m = 0; Py_ssize_t __pyx_bstride_1_m = 0; Py_ssize_t __pyx_bshape_0_m = 0; Py_ssize_t __pyx_bshape_1_m = 0; Py_buffer __pyx_bstruct_x; Py_ssize_t __pyx_bstride_0_x = 0; Py_ssize_t __pyx_bshape_0_x = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyArrayObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_t_13; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x,&__pyx_n_s__m,0}; __Pyx_RefNannySetupContext("v2m_euclidean"); __pyx_self = __pyx_self; { PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (PyTuple_GET_SIZE(__pyx_args)) { case 0: values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); if (likely(values[0])) kw_args--; else goto __pyx_L5_argtuple_error; case 1: values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m); if (likely(values[1])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("v2m_euclidean", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "v2m_euclidean") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_x = ((PyArrayObject *)values[0]); __pyx_v_m = ((PyArrayObject *)values[1]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("v2m_euclidean", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("dense_distances.v2m_euclidean", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_bstruct_res.buf = NULL; __pyx_bstruct_x.buf = NULL; __pyx_bstruct_m.buf = NULL; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m), __pyx_ptype_5numpy_ndarray, 1, "m", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_x = __pyx_bstruct_x.strides[0]; __pyx_bshape_0_x = __pyx_bstruct_x.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m, (PyObject*)__pyx_v_m, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_m = __pyx_bstruct_m.strides[0]; __pyx_bstride_1_m = __pyx_bstruct_m.strides[1]; __pyx_bshape_0_m = __pyx_bstruct_m.shape[0]; __pyx_bshape_1_m = __pyx_bstruct_m.shape[1]; /* "dense_distances.pyx":53 * """ Euclidean distances between vector x and row vectors in m * """ * cdef int d = len(x) # <<<<<<<<<<<<<< * cdef int n = m.shape[0] * cdef int _d = m.shape[1] */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_d = __pyx_t_1; /* "dense_distances.pyx":54 * """ * cdef int d = len(x) * cdef int n = m.shape[0] # <<<<<<<<<<<<<< * cdef int _d = m.shape[1] * assert _d == d, "Matrix dimension mismatch" */ __pyx_v_n = (__pyx_v_m->dimensions[0]); /* "dense_distances.pyx":55 * cdef int d = len(x) * cdef int n = m.shape[0] * cdef int _d = m.shape[1] # <<<<<<<<<<<<<< * assert _d == d, "Matrix dimension mismatch" * cdef np.ndarray[DTYPE_t, ndim=1] res = np.zeros((n,)) */ __pyx_v__d = (__pyx_v_m->dimensions[1]); /* "dense_distances.pyx":56 * cdef int n = m.shape[0] * cdef int _d = m.shape[1] * assert _d == d, "Matrix dimension mismatch" # <<<<<<<<<<<<<< * cdef np.ndarray[DTYPE_t, ndim=1] res = np.zeros((n,)) * cdef int i */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!(__pyx_v__d == __pyx_v_d))) { PyErr_SetObject(PyExc_AssertionError, ((PyObject *)__pyx_kp_s_2)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #endif /* "dense_distances.pyx":57 * cdef int _d = m.shape[1] * assert _d == d, "Matrix dimension mismatch" * cdef np.ndarray[DTYPE_t, ndim=1] res = np.zeros((n,)) # <<<<<<<<<<<<<< * cdef int i * cdef int j */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__zeros); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyInt_FromLong(__pyx_v_n); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_4)); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_4)); __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_5 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_res, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_res = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_res.buf = NULL; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else {__pyx_bstride_0_res = __pyx_bstruct_res.strides[0]; __pyx_bshape_0_res = __pyx_bstruct_res.shape[0]; } } __pyx_t_5 = 0; __pyx_v_res = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "dense_distances.pyx":60 * cdef int i * cdef int j * for i in range(n): # <<<<<<<<<<<<<< * for j in range(d): * res[i] += euclidean(m[i, j], x[j]) */ __pyx_t_6 = __pyx_v_n; for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) { __pyx_v_i = __pyx_t_7; /* "dense_distances.pyx":61 * cdef int j * for i in range(n): * for j in range(d): # <<<<<<<<<<<<<< * res[i] += euclidean(m[i, j], x[j]) * return res */ __pyx_t_8 = __pyx_v_d; for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_j = __pyx_t_9; /* "dense_distances.pyx":62 * for i in range(n): * for j in range(d): * res[i] += euclidean(m[i, j], x[j]) # <<<<<<<<<<<<<< * return res * */ __pyx_t_10 = __pyx_v_i; __pyx_t_11 = __pyx_v_j; if (__pyx_t_10 < 0) __pyx_t_10 += __pyx_bshape_0_m; if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_bshape_1_m; __pyx_t_12 = __pyx_v_j; if (__pyx_t_12 < 0) __pyx_t_12 += __pyx_bshape_0_x; __pyx_t_13 = __pyx_v_i; if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_bshape_0_res; *__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_13, __pyx_bstride_0_res) += __pyx_f_15dense_distances_euclidean((*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m.buf, __pyx_t_10, __pyx_bstride_0_m, __pyx_t_11, __pyx_bstride_1_m)), (*__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_x.buf, __pyx_t_12, __pyx_bstride_0_x))); } } /* "dense_distances.pyx":63 * for j in range(d): * res[i] += euclidean(m[i, j], x[j]) * return res # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_res)); __pyx_r = ((PyObject *)__pyx_v_res); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_res); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("dense_distances.v2m_euclidean", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_bstruct_res); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_res); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "dense_distances.pyx":66 * * * def v2m_chisquare(np.ndarray[DTYPE_t, ndim=1] x, np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<< * """ Chisquare distances between vector x and row vectors in m * """ */ static PyObject *__pyx_pf_15dense_distances_3v2m_chisquare(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_15dense_distances_3v2m_chisquare[] = " Chisquare distances between vector x and row vectors in m\n "; static PyMethodDef __pyx_mdef_15dense_distances_3v2m_chisquare = {__Pyx_NAMESTR("v2m_chisquare"), (PyCFunction)__pyx_pf_15dense_distances_3v2m_chisquare, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_15dense_distances_3v2m_chisquare)}; static PyObject *__pyx_pf_15dense_distances_3v2m_chisquare(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_m = 0; int __pyx_v_d; int __pyx_v_n; int __pyx_v__d; PyArrayObject *__pyx_v_res = 0; int __pyx_v_i; int __pyx_v_j; Py_buffer __pyx_bstruct_res; Py_ssize_t __pyx_bstride_0_res = 0; Py_ssize_t __pyx_bshape_0_res = 0; Py_buffer __pyx_bstruct_m; Py_ssize_t __pyx_bstride_0_m = 0; Py_ssize_t __pyx_bstride_1_m = 0; Py_ssize_t __pyx_bshape_0_m = 0; Py_ssize_t __pyx_bshape_1_m = 0; Py_buffer __pyx_bstruct_x; Py_ssize_t __pyx_bstride_0_x = 0; Py_ssize_t __pyx_bshape_0_x = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyArrayObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_t_13; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x,&__pyx_n_s__m,0}; __Pyx_RefNannySetupContext("v2m_chisquare"); __pyx_self = __pyx_self; { PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (PyTuple_GET_SIZE(__pyx_args)) { case 0: values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x); if (likely(values[0])) kw_args--; else goto __pyx_L5_argtuple_error; case 1: values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m); if (likely(values[1])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("v2m_chisquare", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "v2m_chisquare") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_x = ((PyArrayObject *)values[0]); __pyx_v_m = ((PyArrayObject *)values[1]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("v2m_chisquare", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("dense_distances.v2m_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_bstruct_res.buf = NULL; __pyx_bstruct_x.buf = NULL; __pyx_bstruct_m.buf = NULL; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m), __pyx_ptype_5numpy_ndarray, 1, "m", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_x = __pyx_bstruct_x.strides[0]; __pyx_bshape_0_x = __pyx_bstruct_x.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m, (PyObject*)__pyx_v_m, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_m = __pyx_bstruct_m.strides[0]; __pyx_bstride_1_m = __pyx_bstruct_m.strides[1]; __pyx_bshape_0_m = __pyx_bstruct_m.shape[0]; __pyx_bshape_1_m = __pyx_bstruct_m.shape[1]; /* "dense_distances.pyx":69 * """ Chisquare distances between vector x and row vectors in m * """ * cdef int d = len(x) # <<<<<<<<<<<<<< * cdef int n = m.shape[0] * cdef int _d = m.shape[1] */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_d = __pyx_t_1; /* "dense_distances.pyx":70 * """ * cdef int d = len(x) * cdef int n = m.shape[0] # <<<<<<<<<<<<<< * cdef int _d = m.shape[1] * assert _d == d, "Matrix dimension mismatch" */ __pyx_v_n = (__pyx_v_m->dimensions[0]); /* "dense_distances.pyx":71 * cdef int d = len(x) * cdef int n = m.shape[0] * cdef int _d = m.shape[1] # <<<<<<<<<<<<<< * assert _d == d, "Matrix dimension mismatch" * cdef np.ndarray[DTYPE_t, ndim=1] res = np.zeros((n,)) */ __pyx_v__d = (__pyx_v_m->dimensions[1]); /* "dense_distances.pyx":72 * cdef int n = m.shape[0] * cdef int _d = m.shape[1] * assert _d == d, "Matrix dimension mismatch" # <<<<<<<<<<<<<< * cdef np.ndarray[DTYPE_t, ndim=1] res = np.zeros((n,)) * cdef int i */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!(__pyx_v__d == __pyx_v_d))) { PyErr_SetObject(PyExc_AssertionError, ((PyObject *)__pyx_kp_s_2)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #endif /* "dense_distances.pyx":73 * cdef int _d = m.shape[1] * assert _d == d, "Matrix dimension mismatch" * cdef np.ndarray[DTYPE_t, ndim=1] res = np.zeros((n,)) # <<<<<<<<<<<<<< * cdef int i * cdef int j */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__zeros); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyInt_FromLong(__pyx_v_n); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_4)); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_4)); __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_5 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_res, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_res = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_res.buf = NULL; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else {__pyx_bstride_0_res = __pyx_bstruct_res.strides[0]; __pyx_bshape_0_res = __pyx_bstruct_res.shape[0]; } } __pyx_t_5 = 0; __pyx_v_res = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "dense_distances.pyx":76 * cdef int i * cdef int j * for i in range(n): # <<<<<<<<<<<<<< * for j in range(d): * res[i] += chisquare(m[i, j], x[j]) */ __pyx_t_6 = __pyx_v_n; for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) { __pyx_v_i = __pyx_t_7; /* "dense_distances.pyx":77 * cdef int j * for i in range(n): * for j in range(d): # <<<<<<<<<<<<<< * res[i] += chisquare(m[i, j], x[j]) * return res */ __pyx_t_8 = __pyx_v_d; for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_j = __pyx_t_9; /* "dense_distances.pyx":78 * for i in range(n): * for j in range(d): * res[i] += chisquare(m[i, j], x[j]) # <<<<<<<<<<<<<< * return res * */ __pyx_t_10 = __pyx_v_i; __pyx_t_11 = __pyx_v_j; if (__pyx_t_10 < 0) __pyx_t_10 += __pyx_bshape_0_m; if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_bshape_1_m; __pyx_t_12 = __pyx_v_j; if (__pyx_t_12 < 0) __pyx_t_12 += __pyx_bshape_0_x; __pyx_t_13 = __pyx_v_i; if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_bshape_0_res; *__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_13, __pyx_bstride_0_res) += __pyx_f_15dense_distances_chisquare((*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m.buf, __pyx_t_10, __pyx_bstride_0_m, __pyx_t_11, __pyx_bstride_1_m)), (*__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_x.buf, __pyx_t_12, __pyx_bstride_0_x))); } } /* "dense_distances.pyx":79 * for j in range(d): * res[i] += chisquare(m[i, j], x[j]) * return res # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_res)); __pyx_r = ((PyObject *)__pyx_v_res); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_res); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("dense_distances.v2m_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_bstruct_res); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_x); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_res); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "dense_distances.pyx":82 * * * def m2m_euclidean(np.ndarray[DTYPE_t, ndim=2] m1, np.ndarray[DTYPE_t, ndim=2] m2): # <<<<<<<<<<<<<< * """ Parallelized Euclidean distances between row vectors in m1 and m2 * """ */ static PyObject *__pyx_pf_15dense_distances_4m2m_euclidean(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_15dense_distances_4m2m_euclidean[] = " Parallelized Euclidean distances between row vectors in m1 and m2\n "; static PyMethodDef __pyx_mdef_15dense_distances_4m2m_euclidean = {__Pyx_NAMESTR("m2m_euclidean"), (PyCFunction)__pyx_pf_15dense_distances_4m2m_euclidean, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_15dense_distances_4m2m_euclidean)}; static PyObject *__pyx_pf_15dense_distances_4m2m_euclidean(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_m1 = 0; PyArrayObject *__pyx_v_m2 = 0; int __pyx_v_n1; int __pyx_v_d; int __pyx_v_n2; int __pyx_v__d; PyArrayObject *__pyx_v_res = 0; int __pyx_v_i; int __pyx_v_j; int __pyx_v_k; Py_buffer __pyx_bstruct_res; Py_ssize_t __pyx_bstride_0_res = 0; Py_ssize_t __pyx_bstride_1_res = 0; Py_ssize_t __pyx_bshape_0_res = 0; Py_ssize_t __pyx_bshape_1_res = 0; Py_buffer __pyx_bstruct_m1; Py_ssize_t __pyx_bstride_0_m1 = 0; Py_ssize_t __pyx_bstride_1_m1 = 0; Py_ssize_t __pyx_bshape_0_m1 = 0; Py_ssize_t __pyx_bshape_1_m1 = 0; Py_buffer __pyx_bstruct_m2; Py_ssize_t __pyx_bstride_0_m2 = 0; Py_ssize_t __pyx_bstride_1_m2 = 0; Py_ssize_t __pyx_bshape_0_m2 = 0; Py_ssize_t __pyx_bshape_1_m2 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyArrayObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_t_13; int __pyx_t_14; int __pyx_t_15; int __pyx_t_16; int __pyx_t_17; int __pyx_t_18; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__m1,&__pyx_n_s__m2,0}; __Pyx_RefNannySetupContext("m2m_euclidean"); __pyx_self = __pyx_self; { PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (PyTuple_GET_SIZE(__pyx_args)) { case 0: values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m1); if (likely(values[0])) kw_args--; else goto __pyx_L5_argtuple_error; case 1: values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m2); if (likely(values[1])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("m2m_euclidean", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "m2m_euclidean") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_m1 = ((PyArrayObject *)values[0]); __pyx_v_m2 = ((PyArrayObject *)values[1]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("m2m_euclidean", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("dense_distances.m2m_euclidean", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_bstruct_res.buf = NULL; __pyx_bstruct_m1.buf = NULL; __pyx_bstruct_m2.buf = NULL; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m1), __pyx_ptype_5numpy_ndarray, 1, "m1", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m2), __pyx_ptype_5numpy_ndarray, 1, "m2", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m1, (PyObject*)__pyx_v_m1, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_m1 = __pyx_bstruct_m1.strides[0]; __pyx_bstride_1_m1 = __pyx_bstruct_m1.strides[1]; __pyx_bshape_0_m1 = __pyx_bstruct_m1.shape[0]; __pyx_bshape_1_m1 = __pyx_bstruct_m1.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m2, (PyObject*)__pyx_v_m2, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_m2 = __pyx_bstruct_m2.strides[0]; __pyx_bstride_1_m2 = __pyx_bstruct_m2.strides[1]; __pyx_bshape_0_m2 = __pyx_bstruct_m2.shape[0]; __pyx_bshape_1_m2 = __pyx_bstruct_m2.shape[1]; /* "dense_distances.pyx":85 * """ Parallelized Euclidean distances between row vectors in m1 and m2 * """ * cdef int n1 = m1.shape[0] # <<<<<<<<<<<<<< * cdef int d = m1.shape[1] * cdef int n2 = m2.shape[0] */ __pyx_v_n1 = (__pyx_v_m1->dimensions[0]); /* "dense_distances.pyx":86 * """ * cdef int n1 = m1.shape[0] * cdef int d = m1.shape[1] # <<<<<<<<<<<<<< * cdef int n2 = m2.shape[0] * cdef int _d = m2.shape[1] */ __pyx_v_d = (__pyx_v_m1->dimensions[1]); /* "dense_distances.pyx":87 * cdef int n1 = m1.shape[0] * cdef int d = m1.shape[1] * cdef int n2 = m2.shape[0] # <<<<<<<<<<<<<< * cdef int _d = m2.shape[1] * assert _d == d, "Matrix dimension mismatch" */ __pyx_v_n2 = (__pyx_v_m2->dimensions[0]); /* "dense_distances.pyx":88 * cdef int d = m1.shape[1] * cdef int n2 = m2.shape[0] * cdef int _d = m2.shape[1] # <<<<<<<<<<<<<< * assert _d == d, "Matrix dimension mismatch" * cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n1, n2)) */ __pyx_v__d = (__pyx_v_m2->dimensions[1]); /* "dense_distances.pyx":89 * cdef int n2 = m2.shape[0] * cdef int _d = m2.shape[1] * assert _d == d, "Matrix dimension mismatch" # <<<<<<<<<<<<<< * cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n1, n2)) * cdef int i */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!(__pyx_v__d == __pyx_v_d))) { PyErr_SetObject(PyExc_AssertionError, ((PyObject *)__pyx_kp_s_2)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #endif /* "dense_distances.pyx":90 * cdef int _d = m2.shape[1] * assert _d == d, "Matrix dimension mismatch" * cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n1, n2)) # <<<<<<<<<<<<<< * cdef int i * cdef int j */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyInt_FromLong(__pyx_v_n1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyInt_FromLong(__pyx_v_n2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_4)); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_4)); __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_5 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_res, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { __pyx_v_res = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_res.buf = NULL; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else {__pyx_bstride_0_res = __pyx_bstruct_res.strides[0]; __pyx_bstride_1_res = __pyx_bstruct_res.strides[1]; __pyx_bshape_0_res = __pyx_bstruct_res.shape[0]; __pyx_bshape_1_res = __pyx_bstruct_res.shape[1]; } } __pyx_t_5 = 0; __pyx_v_res = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "dense_distances.pyx":94 * cdef int j * cdef int k * for i in prange(n1, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<< * for j in range(n2): * for k in range(d): */ { #ifdef WITH_THREAD PyThreadState *_save = NULL; #endif Py_UNBLOCK_THREADS /*try:*/ { __pyx_t_6 = __pyx_v_n1; if (1 == 0) abort(); { __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { __pyx_v_i = 0; #ifdef _OPENMP #pragma omp parallel private(__pyx_t_16, __pyx_t_11, __pyx_t_9, __pyx_t_10, __pyx_t_15, __pyx_t_14, __pyx_t_18, __pyx_t_13, __pyx_t_12, __pyx_t_17) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) schedule(runtime) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_i = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); __pyx_v_k = ((int)0xbad0bad0); /* "dense_distances.pyx":95 * cdef int k * for i in prange(n1, nogil=True, schedule='runtime'): * for j in range(n2): # <<<<<<<<<<<<<< * for k in range(d): * res[i,j] += euclidean(m1[i, k], m2[j, k]) */ __pyx_t_9 = __pyx_v_n2; for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { __pyx_v_j = __pyx_t_10; /* "dense_distances.pyx":96 * for i in prange(n1, nogil=True, schedule='runtime'): * for j in range(n2): * for k in range(d): # <<<<<<<<<<<<<< * res[i,j] += euclidean(m1[i, k], m2[j, k]) * return res */ __pyx_t_11 = __pyx_v_d; for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_k = __pyx_t_12; /* "dense_distances.pyx":97 * for j in range(n2): * for k in range(d): * res[i,j] += euclidean(m1[i, k], m2[j, k]) # <<<<<<<<<<<<<< * return res * */ __pyx_t_13 = __pyx_v_i; __pyx_t_14 = __pyx_v_k; if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_bshape_0_m1; if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_bshape_1_m1; __pyx_t_15 = __pyx_v_j; __pyx_t_16 = __pyx_v_k; if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_bshape_0_m2; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_bshape_1_m2; __pyx_t_17 = __pyx_v_i; __pyx_t_18 = __pyx_v_j; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_bshape_0_res; if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_bshape_1_res; *__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_17, __pyx_bstride_0_res, __pyx_t_18, __pyx_bstride_1_res) += __pyx_f_15dense_distances_euclidean((*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m1.buf, __pyx_t_13, __pyx_bstride_0_m1, __pyx_t_14, __pyx_bstride_1_m1)), (*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m2.buf, __pyx_t_15, __pyx_bstride_0_m2, __pyx_t_16, __pyx_bstride_1_m2))); } } } } } } } } /* "dense_distances.pyx":94 * cdef int j * cdef int k * for i in prange(n1, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<< * for j in range(n2): * for k in range(d): */ /*finally:*/ { Py_BLOCK_THREADS } } /* "dense_distances.pyx":98 * for k in range(d): * res[i,j] += euclidean(m1[i, k], m2[j, k]) * return res # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_res)); __pyx_r = ((PyObject *)__pyx_v_res); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_res); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m1); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m2); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("dense_distances.m2m_euclidean", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_bstruct_res); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m1); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m2); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_res); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "dense_distances.pyx":101 * * * def m2m_chisquare(np.ndarray[DTYPE_t, ndim=2] m1, np.ndarray[DTYPE_t, ndim=2] m2): # <<<<<<<<<<<<<< * """ Parallelized Chisquare distances between row vectors in m1 and m2 * """ */ static PyObject *__pyx_pf_15dense_distances_5m2m_chisquare(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_15dense_distances_5m2m_chisquare[] = " Parallelized Chisquare distances between row vectors in m1 and m2\n "; static PyMethodDef __pyx_mdef_15dense_distances_5m2m_chisquare = {__Pyx_NAMESTR("m2m_chisquare"), (PyCFunction)__pyx_pf_15dense_distances_5m2m_chisquare, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_15dense_distances_5m2m_chisquare)}; static PyObject *__pyx_pf_15dense_distances_5m2m_chisquare(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_m1 = 0; PyArrayObject *__pyx_v_m2 = 0; int __pyx_v_n1; int __pyx_v_d; int __pyx_v_n2; int __pyx_v__d; PyArrayObject *__pyx_v_res = 0; int __pyx_v_i; int __pyx_v_j; int __pyx_v_k; Py_buffer __pyx_bstruct_res; Py_ssize_t __pyx_bstride_0_res = 0; Py_ssize_t __pyx_bstride_1_res = 0; Py_ssize_t __pyx_bshape_0_res = 0; Py_ssize_t __pyx_bshape_1_res = 0; Py_buffer __pyx_bstruct_m1; Py_ssize_t __pyx_bstride_0_m1 = 0; Py_ssize_t __pyx_bstride_1_m1 = 0; Py_ssize_t __pyx_bshape_0_m1 = 0; Py_ssize_t __pyx_bshape_1_m1 = 0; Py_buffer __pyx_bstruct_m2; Py_ssize_t __pyx_bstride_0_m2 = 0; Py_ssize_t __pyx_bstride_1_m2 = 0; Py_ssize_t __pyx_bshape_0_m2 = 0; Py_ssize_t __pyx_bshape_1_m2 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyArrayObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_t_13; int __pyx_t_14; int __pyx_t_15; int __pyx_t_16; int __pyx_t_17; int __pyx_t_18; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__m1,&__pyx_n_s__m2,0}; __Pyx_RefNannySetupContext("m2m_chisquare"); __pyx_self = __pyx_self; { PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (PyTuple_GET_SIZE(__pyx_args)) { case 0: values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m1); if (likely(values[0])) kw_args--; else goto __pyx_L5_argtuple_error; case 1: values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m2); if (likely(values[1])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("m2m_chisquare", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "m2m_chisquare") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_m1 = ((PyArrayObject *)values[0]); __pyx_v_m2 = ((PyArrayObject *)values[1]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("m2m_chisquare", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("dense_distances.m2m_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_bstruct_res.buf = NULL; __pyx_bstruct_m1.buf = NULL; __pyx_bstruct_m2.buf = NULL; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m1), __pyx_ptype_5numpy_ndarray, 1, "m1", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m2), __pyx_ptype_5numpy_ndarray, 1, "m2", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m1, (PyObject*)__pyx_v_m1, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_m1 = __pyx_bstruct_m1.strides[0]; __pyx_bstride_1_m1 = __pyx_bstruct_m1.strides[1]; __pyx_bshape_0_m1 = __pyx_bstruct_m1.shape[0]; __pyx_bshape_1_m1 = __pyx_bstruct_m1.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m2, (PyObject*)__pyx_v_m2, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_m2 = __pyx_bstruct_m2.strides[0]; __pyx_bstride_1_m2 = __pyx_bstruct_m2.strides[1]; __pyx_bshape_0_m2 = __pyx_bstruct_m2.shape[0]; __pyx_bshape_1_m2 = __pyx_bstruct_m2.shape[1]; /* "dense_distances.pyx":104 * """ Parallelized Chisquare distances between row vectors in m1 and m2 * """ * cdef int n1 = m1.shape[0] # <<<<<<<<<<<<<< * cdef int d = m1.shape[1] * cdef int n2 = m2.shape[0] */ __pyx_v_n1 = (__pyx_v_m1->dimensions[0]); /* "dense_distances.pyx":105 * """ * cdef int n1 = m1.shape[0] * cdef int d = m1.shape[1] # <<<<<<<<<<<<<< * cdef int n2 = m2.shape[0] * cdef int _d = m2.shape[1] */ __pyx_v_d = (__pyx_v_m1->dimensions[1]); /* "dense_distances.pyx":106 * cdef int n1 = m1.shape[0] * cdef int d = m1.shape[1] * cdef int n2 = m2.shape[0] # <<<<<<<<<<<<<< * cdef int _d = m2.shape[1] * assert _d == d, "Matrix dimension mismatch" */ __pyx_v_n2 = (__pyx_v_m2->dimensions[0]); /* "dense_distances.pyx":107 * cdef int d = m1.shape[1] * cdef int n2 = m2.shape[0] * cdef int _d = m2.shape[1] # <<<<<<<<<<<<<< * assert _d == d, "Matrix dimension mismatch" * cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n1, n2)) */ __pyx_v__d = (__pyx_v_m2->dimensions[1]); /* "dense_distances.pyx":108 * cdef int n2 = m2.shape[0] * cdef int _d = m2.shape[1] * assert _d == d, "Matrix dimension mismatch" # <<<<<<<<<<<<<< * cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n1, n2)) * cdef int i */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!(__pyx_v__d == __pyx_v_d))) { PyErr_SetObject(PyExc_AssertionError, ((PyObject *)__pyx_kp_s_2)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #endif /* "dense_distances.pyx":109 * cdef int _d = m2.shape[1] * assert _d == d, "Matrix dimension mismatch" * cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n1, n2)) # <<<<<<<<<<<<<< * cdef int i * cdef int j */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyInt_FromLong(__pyx_v_n1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyInt_FromLong(__pyx_v_n2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_4)); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_4)); __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_5 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_res, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { __pyx_v_res = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_res.buf = NULL; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else {__pyx_bstride_0_res = __pyx_bstruct_res.strides[0]; __pyx_bstride_1_res = __pyx_bstruct_res.strides[1]; __pyx_bshape_0_res = __pyx_bstruct_res.shape[0]; __pyx_bshape_1_res = __pyx_bstruct_res.shape[1]; } } __pyx_t_5 = 0; __pyx_v_res = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "dense_distances.pyx":113 * cdef int j * cdef int k * for i in prange(n1, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<< * for j in range(n2): * for k in range(d): */ { #ifdef WITH_THREAD PyThreadState *_save = NULL; #endif Py_UNBLOCK_THREADS /*try:*/ { __pyx_t_6 = __pyx_v_n1; if (1 == 0) abort(); { __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { __pyx_v_i = 0; #ifdef _OPENMP #pragma omp parallel private(__pyx_t_16, __pyx_t_11, __pyx_t_9, __pyx_t_10, __pyx_t_15, __pyx_t_14, __pyx_t_18, __pyx_t_13, __pyx_t_12, __pyx_t_17) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) schedule(runtime) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_i = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); __pyx_v_k = ((int)0xbad0bad0); /* "dense_distances.pyx":114 * cdef int k * for i in prange(n1, nogil=True, schedule='runtime'): * for j in range(n2): # <<<<<<<<<<<<<< * for k in range(d): * res[i,j] += chisquare(m1[i, k], m2[j, k]) */ __pyx_t_9 = __pyx_v_n2; for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { __pyx_v_j = __pyx_t_10; /* "dense_distances.pyx":115 * for i in prange(n1, nogil=True, schedule='runtime'): * for j in range(n2): * for k in range(d): # <<<<<<<<<<<<<< * res[i,j] += chisquare(m1[i, k], m2[j, k]) * return res */ __pyx_t_11 = __pyx_v_d; for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_k = __pyx_t_12; /* "dense_distances.pyx":116 * for j in range(n2): * for k in range(d): * res[i,j] += chisquare(m1[i, k], m2[j, k]) # <<<<<<<<<<<<<< * return res * */ __pyx_t_13 = __pyx_v_i; __pyx_t_14 = __pyx_v_k; if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_bshape_0_m1; if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_bshape_1_m1; __pyx_t_15 = __pyx_v_j; __pyx_t_16 = __pyx_v_k; if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_bshape_0_m2; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_bshape_1_m2; __pyx_t_17 = __pyx_v_i; __pyx_t_18 = __pyx_v_j; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_bshape_0_res; if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_bshape_1_res; *__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_17, __pyx_bstride_0_res, __pyx_t_18, __pyx_bstride_1_res) += __pyx_f_15dense_distances_chisquare((*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m1.buf, __pyx_t_13, __pyx_bstride_0_m1, __pyx_t_14, __pyx_bstride_1_m1)), (*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m2.buf, __pyx_t_15, __pyx_bstride_0_m2, __pyx_t_16, __pyx_bstride_1_m2))); } } } } } } } } /* "dense_distances.pyx":113 * cdef int j * cdef int k * for i in prange(n1, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<< * for j in range(n2): * for k in range(d): */ /*finally:*/ { Py_BLOCK_THREADS } } /* "dense_distances.pyx":117 * for k in range(d): * res[i,j] += chisquare(m1[i, k], m2[j, k]) * return res # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_res)); __pyx_r = ((PyObject *)__pyx_v_res); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_res); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m1); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m2); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("dense_distances.m2m_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_bstruct_res); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m1); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m2); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_res); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "dense_distances.pyx":120 * * * def gram_euclidean(np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<< * """ Parallelized Euclidean distances between all row vectors of m * """ */ static PyObject *__pyx_pf_15dense_distances_6gram_euclidean(PyObject *__pyx_self, PyObject *__pyx_v_m); /*proto*/ static char __pyx_doc_15dense_distances_6gram_euclidean[] = " Parallelized Euclidean distances between all row vectors of m\n "; static PyMethodDef __pyx_mdef_15dense_distances_6gram_euclidean = {__Pyx_NAMESTR("gram_euclidean"), (PyCFunction)__pyx_pf_15dense_distances_6gram_euclidean, METH_O, __Pyx_DOCSTR(__pyx_doc_15dense_distances_6gram_euclidean)}; static PyObject *__pyx_pf_15dense_distances_6gram_euclidean(PyObject *__pyx_self, PyObject *__pyx_v_m) { int __pyx_v_n; int __pyx_v_d; PyArrayObject *__pyx_v_res = 0; int __pyx_v_i; int __pyx_v_j; int __pyx_v_k; Py_buffer __pyx_bstruct_res; Py_ssize_t __pyx_bstride_0_res = 0; Py_ssize_t __pyx_bstride_1_res = 0; Py_ssize_t __pyx_bshape_0_res = 0; Py_ssize_t __pyx_bshape_1_res = 0; Py_buffer __pyx_bstruct_m; Py_ssize_t __pyx_bstride_0_m = 0; Py_ssize_t __pyx_bstride_1_m = 0; Py_ssize_t __pyx_bshape_0_m = 0; Py_ssize_t __pyx_bshape_1_m = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyArrayObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_t_13; int __pyx_t_14; int __pyx_t_15; int __pyx_t_16; int __pyx_t_17; int __pyx_t_18; int __pyx_t_19; int __pyx_t_20; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gram_euclidean"); __pyx_self = __pyx_self; __pyx_bstruct_res.buf = NULL; __pyx_bstruct_m.buf = NULL; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m), __pyx_ptype_5numpy_ndarray, 1, "m", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m, (PyObject*)__pyx_v_m, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_m = __pyx_bstruct_m.strides[0]; __pyx_bstride_1_m = __pyx_bstruct_m.strides[1]; __pyx_bshape_0_m = __pyx_bstruct_m.shape[0]; __pyx_bshape_1_m = __pyx_bstruct_m.shape[1]; /* "dense_distances.pyx":123 * """ Parallelized Euclidean distances between all row vectors of m * """ * cdef int n = m.shape[0] # <<<<<<<<<<<<<< * cdef int d = m.shape[1] * cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n, n)) */ __pyx_v_n = (((PyArrayObject *)__pyx_v_m)->dimensions[0]); /* "dense_distances.pyx":124 * """ * cdef int n = m.shape[0] * cdef int d = m.shape[1] # <<<<<<<<<<<<<< * cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n, n)) * cdef int i */ __pyx_v_d = (((PyArrayObject *)__pyx_v_m)->dimensions[1]); /* "dense_distances.pyx":125 * cdef int n = m.shape[0] * cdef int d = m.shape[1] * cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n, n)) # <<<<<<<<<<<<<< * cdef int i * cdef int j */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyInt_FromLong(__pyx_v_n); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyInt_FromLong(__pyx_v_n); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_4)); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_4)); __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_5 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_res, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { __pyx_v_res = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_res.buf = NULL; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else {__pyx_bstride_0_res = __pyx_bstruct_res.strides[0]; __pyx_bstride_1_res = __pyx_bstruct_res.strides[1]; __pyx_bshape_0_res = __pyx_bstruct_res.shape[0]; __pyx_bshape_1_res = __pyx_bstruct_res.shape[1]; } } __pyx_t_5 = 0; __pyx_v_res = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "dense_distances.pyx":129 * cdef int j * cdef int k * for i in prange(n, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<< * for j in range(i): * for k in range(d): */ { #ifdef WITH_THREAD PyThreadState *_save = NULL; #endif Py_UNBLOCK_THREADS /*try:*/ { __pyx_t_6 = __pyx_v_n; if (1 == 0) abort(); { __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { __pyx_v_i = 0; #ifdef _OPENMP #pragma omp parallel private(__pyx_t_16, __pyx_t_11, __pyx_t_9, __pyx_t_10, __pyx_t_15, __pyx_t_14, __pyx_t_19, __pyx_t_18, __pyx_t_13, __pyx_t_12, __pyx_t_17, __pyx_t_20) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) schedule(runtime) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_i = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); __pyx_v_k = ((int)0xbad0bad0); /* "dense_distances.pyx":130 * cdef int k * for i in prange(n, nogil=True, schedule='runtime'): * for j in range(i): # <<<<<<<<<<<<<< * for k in range(d): * res[i,j] += euclidean(m[i, k], m[j, k]) */ __pyx_t_9 = __pyx_v_i; for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { __pyx_v_j = __pyx_t_10; /* "dense_distances.pyx":131 * for i in prange(n, nogil=True, schedule='runtime'): * for j in range(i): * for k in range(d): # <<<<<<<<<<<<<< * res[i,j] += euclidean(m[i, k], m[j, k]) * res[j,i] = res[i,j] */ __pyx_t_11 = __pyx_v_d; for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_k = __pyx_t_12; /* "dense_distances.pyx":132 * for j in range(i): * for k in range(d): * res[i,j] += euclidean(m[i, k], m[j, k]) # <<<<<<<<<<<<<< * res[j,i] = res[i,j] * return res */ __pyx_t_13 = __pyx_v_i; __pyx_t_14 = __pyx_v_k; if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_bshape_0_m; if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_bshape_1_m; __pyx_t_15 = __pyx_v_j; __pyx_t_16 = __pyx_v_k; if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_bshape_0_m; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_bshape_1_m; __pyx_t_17 = __pyx_v_i; __pyx_t_18 = __pyx_v_j; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_bshape_0_res; if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_bshape_1_res; *__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_17, __pyx_bstride_0_res, __pyx_t_18, __pyx_bstride_1_res) += __pyx_f_15dense_distances_euclidean((*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m.buf, __pyx_t_13, __pyx_bstride_0_m, __pyx_t_14, __pyx_bstride_1_m)), (*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m.buf, __pyx_t_15, __pyx_bstride_0_m, __pyx_t_16, __pyx_bstride_1_m))); } /* "dense_distances.pyx":133 * for k in range(d): * res[i,j] += euclidean(m[i, k], m[j, k]) * res[j,i] = res[i,j] # <<<<<<<<<<<<<< * return res * */ __pyx_t_11 = __pyx_v_i; __pyx_t_12 = __pyx_v_j; if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_bshape_0_res; if (__pyx_t_12 < 0) __pyx_t_12 += __pyx_bshape_1_res; __pyx_t_19 = __pyx_v_j; __pyx_t_20 = __pyx_v_i; if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_bshape_0_res; if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_bshape_1_res; *__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_19, __pyx_bstride_0_res, __pyx_t_20, __pyx_bstride_1_res) = (*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_11, __pyx_bstride_0_res, __pyx_t_12, __pyx_bstride_1_res)); } } } } } } } /* "dense_distances.pyx":129 * cdef int j * cdef int k * for i in prange(n, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<< * for j in range(i): * for k in range(d): */ /*finally:*/ { Py_BLOCK_THREADS } } /* "dense_distances.pyx":134 * res[i,j] += euclidean(m[i, k], m[j, k]) * res[j,i] = res[i,j] * return res # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_res)); __pyx_r = ((PyObject *)__pyx_v_res); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_res); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("dense_distances.gram_euclidean", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_bstruct_res); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_res); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "dense_distances.pyx":137 * * * def gram_chisquare(np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<< * """ Parallelized Chisquare distances between all row vectors of m * """ */ static PyObject *__pyx_pf_15dense_distances_7gram_chisquare(PyObject *__pyx_self, PyObject *__pyx_v_m); /*proto*/ static char __pyx_doc_15dense_distances_7gram_chisquare[] = " Parallelized Chisquare distances between all row vectors of m\n "; static PyMethodDef __pyx_mdef_15dense_distances_7gram_chisquare = {__Pyx_NAMESTR("gram_chisquare"), (PyCFunction)__pyx_pf_15dense_distances_7gram_chisquare, METH_O, __Pyx_DOCSTR(__pyx_doc_15dense_distances_7gram_chisquare)}; static PyObject *__pyx_pf_15dense_distances_7gram_chisquare(PyObject *__pyx_self, PyObject *__pyx_v_m) { int __pyx_v_n; int __pyx_v_d; PyArrayObject *__pyx_v_res = 0; int __pyx_v_i; int __pyx_v_j; int __pyx_v_k; Py_buffer __pyx_bstruct_res; Py_ssize_t __pyx_bstride_0_res = 0; Py_ssize_t __pyx_bstride_1_res = 0; Py_ssize_t __pyx_bshape_0_res = 0; Py_ssize_t __pyx_bshape_1_res = 0; Py_buffer __pyx_bstruct_m; Py_ssize_t __pyx_bstride_0_m = 0; Py_ssize_t __pyx_bstride_1_m = 0; Py_ssize_t __pyx_bshape_0_m = 0; Py_ssize_t __pyx_bshape_1_m = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyArrayObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_t_13; int __pyx_t_14; int __pyx_t_15; int __pyx_t_16; int __pyx_t_17; int __pyx_t_18; int __pyx_t_19; int __pyx_t_20; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gram_chisquare"); __pyx_self = __pyx_self; __pyx_bstruct_res.buf = NULL; __pyx_bstruct_m.buf = NULL; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m), __pyx_ptype_5numpy_ndarray, 1, "m", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m, (PyObject*)__pyx_v_m, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_m = __pyx_bstruct_m.strides[0]; __pyx_bstride_1_m = __pyx_bstruct_m.strides[1]; __pyx_bshape_0_m = __pyx_bstruct_m.shape[0]; __pyx_bshape_1_m = __pyx_bstruct_m.shape[1]; /* "dense_distances.pyx":140 * """ Parallelized Chisquare distances between all row vectors of m * """ * cdef int n = m.shape[0] # <<<<<<<<<<<<<< * cdef int d = m.shape[1] * cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n, n)) */ __pyx_v_n = (((PyArrayObject *)__pyx_v_m)->dimensions[0]); /* "dense_distances.pyx":141 * """ * cdef int n = m.shape[0] * cdef int d = m.shape[1] # <<<<<<<<<<<<<< * cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n, n)) * cdef int i */ __pyx_v_d = (((PyArrayObject *)__pyx_v_m)->dimensions[1]); /* "dense_distances.pyx":142 * cdef int n = m.shape[0] * cdef int d = m.shape[1] * cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n, n)) # <<<<<<<<<<<<<< * cdef int i * cdef int j */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyInt_FromLong(__pyx_v_n); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyInt_FromLong(__pyx_v_n); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_4)); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_4)); __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_5 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_res, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { __pyx_v_res = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_res.buf = NULL; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else {__pyx_bstride_0_res = __pyx_bstruct_res.strides[0]; __pyx_bstride_1_res = __pyx_bstruct_res.strides[1]; __pyx_bshape_0_res = __pyx_bstruct_res.shape[0]; __pyx_bshape_1_res = __pyx_bstruct_res.shape[1]; } } __pyx_t_5 = 0; __pyx_v_res = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "dense_distances.pyx":146 * cdef int j * cdef int k * for i in prange(n, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<< * for j in range(i): * for k in range(d): */ { #ifdef WITH_THREAD PyThreadState *_save = NULL; #endif Py_UNBLOCK_THREADS /*try:*/ { __pyx_t_6 = __pyx_v_n; if (1 == 0) abort(); { __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { __pyx_v_i = 0; #ifdef _OPENMP #pragma omp parallel private(__pyx_t_16, __pyx_t_11, __pyx_t_9, __pyx_t_10, __pyx_t_15, __pyx_t_14, __pyx_t_19, __pyx_t_18, __pyx_t_13, __pyx_t_12, __pyx_t_17, __pyx_t_20) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) schedule(runtime) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_i = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); __pyx_v_k = ((int)0xbad0bad0); /* "dense_distances.pyx":147 * cdef int k * for i in prange(n, nogil=True, schedule='runtime'): * for j in range(i): # <<<<<<<<<<<<<< * for k in range(d): * res[i,j] += chisquare(m[i, k], m[j, k]) */ __pyx_t_9 = __pyx_v_i; for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { __pyx_v_j = __pyx_t_10; /* "dense_distances.pyx":148 * for i in prange(n, nogil=True, schedule='runtime'): * for j in range(i): * for k in range(d): # <<<<<<<<<<<<<< * res[i,j] += chisquare(m[i, k], m[j, k]) * res[j,i] = res[i,j] */ __pyx_t_11 = __pyx_v_d; for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_k = __pyx_t_12; /* "dense_distances.pyx":149 * for j in range(i): * for k in range(d): * res[i,j] += chisquare(m[i, k], m[j, k]) # <<<<<<<<<<<<<< * res[j,i] = res[i,j] * return res */ __pyx_t_13 = __pyx_v_i; __pyx_t_14 = __pyx_v_k; if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_bshape_0_m; if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_bshape_1_m; __pyx_t_15 = __pyx_v_j; __pyx_t_16 = __pyx_v_k; if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_bshape_0_m; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_bshape_1_m; __pyx_t_17 = __pyx_v_i; __pyx_t_18 = __pyx_v_j; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_bshape_0_res; if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_bshape_1_res; *__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_17, __pyx_bstride_0_res, __pyx_t_18, __pyx_bstride_1_res) += __pyx_f_15dense_distances_chisquare((*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m.buf, __pyx_t_13, __pyx_bstride_0_m, __pyx_t_14, __pyx_bstride_1_m)), (*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m.buf, __pyx_t_15, __pyx_bstride_0_m, __pyx_t_16, __pyx_bstride_1_m))); } /* "dense_distances.pyx":150 * for k in range(d): * res[i,j] += chisquare(m[i, k], m[j, k]) * res[j,i] = res[i,j] # <<<<<<<<<<<<<< * return res */ __pyx_t_11 = __pyx_v_i; __pyx_t_12 = __pyx_v_j; if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_bshape_0_res; if (__pyx_t_12 < 0) __pyx_t_12 += __pyx_bshape_1_res; __pyx_t_19 = __pyx_v_j; __pyx_t_20 = __pyx_v_i; if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_bshape_0_res; if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_bshape_1_res; *__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_19, __pyx_bstride_0_res, __pyx_t_20, __pyx_bstride_1_res) = (*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_11, __pyx_bstride_0_res, __pyx_t_12, __pyx_bstride_1_res)); } } } } } } } /* "dense_distances.pyx":146 * cdef int j * cdef int k * for i in prange(n, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<< * for j in range(i): * for k in range(d): */ /*finally:*/ { Py_BLOCK_THREADS } } /* "dense_distances.pyx":151 * res[i,j] += chisquare(m[i, k], m[j, k]) * res[j,i] = res[i,j] * return res # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_res)); __pyx_r = ((PyObject *)__pyx_v_res); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_res); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("dense_distances.gram_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_bstruct_res); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_m); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_res); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":190 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__"); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":196 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":199 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":200 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":202 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(((PyArrayObject *)__pyx_v_self)); /* "numpy.pxd":204 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":205 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L6; } /*else*/ { /* "numpy.pxd":207 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L6:; /* "numpy.pxd":209 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":210 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":211 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":213 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(((PyArrayObject *)__pyx_v_self)); /* "numpy.pxd":218 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":219 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":222 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":223 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":224 * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":225 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); /* "numpy.pxd":226 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); } goto __pyx_L9; } /*else*/ { /* "numpy.pxd":228 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))); /* "numpy.pxd":229 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(((PyArrayObject *)__pyx_v_self))); } __pyx_L9:; /* "numpy.pxd":230 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":231 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(((PyArrayObject *)__pyx_v_self)); /* "numpy.pxd":232 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(((PyArrayObject *)__pyx_v_self))); /* "numpy.pxd":235 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":236 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_v_self)->descr)); __pyx_v_descr = ((PyArrayObject *)__pyx_v_self)->descr; /* "numpy.pxd":240 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":242 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":244 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L12; } /*else*/ { /* "numpy.pxd":247 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(__pyx_v_self); __Pyx_GIVEREF(__pyx_v_self); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = __pyx_v_self; } __pyx_L12:; /* "numpy.pxd":249 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == '>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":250 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == '>' and little_endian) or * (descr.byteorder == '<' and not little_endian)): */ __pyx_v_t = __pyx_v_descr->type_num; /* "numpy.pxd":251 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == '<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":252 * t = descr.type_num * if ((descr.byteorder == '>' and little_endian) or * (descr.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":253 * if ((descr.byteorder == '>' and little_endian) or * (descr.byteorder == '<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L14; } __pyx_L14:; /* "numpy.pxd":254 * (descr.byteorder == '<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L15; } /* "numpy.pxd":255 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L15; } /* "numpy.pxd":256 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L15; } /* "numpy.pxd":257 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L15; } /* "numpy.pxd":258 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L15; } /* "numpy.pxd":259 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L15; } /* "numpy.pxd":260 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L15; } /* "numpy.pxd":261 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L15; } /* "numpy.pxd":262 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L15; } /* "numpy.pxd":263 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L15; } /* "numpy.pxd":264 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L15; } /* "numpy.pxd":265 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L15; } /* "numpy.pxd":266 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L15; } /* "numpy.pxd":267 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L15; } /* "numpy.pxd":268 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L15; } /* "numpy.pxd":269 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L15; } /* "numpy.pxd":270 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L15; } /*else*/ { /* "numpy.pxd":272 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_4)); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L15:; /* "numpy.pxd":273 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":274 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * return * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = '^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":277 * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = '^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":278 * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = '^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":281 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = 0 # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":282 * info.format + _buffer_format_string_len, * &offset) * f[0] = 0 # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = 0; } __pyx_L13:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":284 * f[0] = 0 # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__"); /* "numpy.pxd":285 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(((PyArrayObject *)__pyx_v_self)); if (__pyx_t_1) { /* "numpy.pxd":286 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":287 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":288 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L6; } __pyx_L6:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":764 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1"); /* "numpy.pxd":765 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":767 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2"); /* "numpy.pxd":768 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":770 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3"); /* "numpy.pxd":771 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":773 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4"); /* "numpy.pxd":774 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":776 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5"); /* "numpy.pxd":777 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":779 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; long __pyx_t_10; char *__pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring"); /* "numpy.pxd":786 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":787 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":790 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":791 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":792 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); } else { __Pyx_UnpackTupleError(((PyObject *)__pyx_v_fields), 2); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":794 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { /* "numpy.pxd":795 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == '>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_11), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":797 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == '<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_6 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_6) { __pyx_t_7 = __pyx_v_little_endian; } else { __pyx_t_7 = __pyx_t_6; } if (!__pyx_t_7) { /* "numpy.pxd":798 * * if ((child.byteorder == '>' and little_endian) or * (child.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_6 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_6) { __pyx_t_8 = (!__pyx_v_little_endian); __pyx_t_9 = __pyx_t_8; } else { __pyx_t_9 = __pyx_t_6; } __pyx_t_6 = __pyx_t_9; } else { __pyx_t_6 = __pyx_t_7; } if (__pyx_t_6) { /* "numpy.pxd":799 * if ((child.byteorder == '>' and little_endian) or * (child.byteorder == '<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":809 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_6) break; /* "numpy.pxd":810 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":811 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":812 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); } /* "numpy.pxd":814 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); /* "numpy.pxd":816 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_6) { /* "numpy.pxd":817 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 817; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":818 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_6) { /* "numpy.pxd":819 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L10; } __pyx_L10:; /* "numpy.pxd":822 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L11; } /* "numpy.pxd":823 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L11; } /* "numpy.pxd":824 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 104; goto __pyx_L11; } /* "numpy.pxd":825 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L11; } /* "numpy.pxd":826 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 105; goto __pyx_L11; } /* "numpy.pxd":827 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L11; } /* "numpy.pxd":828 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 108; goto __pyx_L11; } /* "numpy.pxd":829 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L11; } /* "numpy.pxd":830 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 113; goto __pyx_L11; } /* "numpy.pxd":831 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L11; } /* "numpy.pxd":832 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 102; goto __pyx_L11; } /* "numpy.pxd":833 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 100; goto __pyx_L11; } /* "numpy.pxd":834 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 103; goto __pyx_L11; } /* "numpy.pxd":835 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "numpy.pxd":836 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "numpy.pxd":837 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "numpy.pxd":838 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":840 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L11:; /* "numpy.pxd":841 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L9; } /*else*/ { /* "numpy.pxd":845 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_11; } __pyx_L9:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":846 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":961 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base"); /* "numpy.pxd":963 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":964 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":966 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = <PyObject*>base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":967 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":968 * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":969 * baseptr = <PyObject*>base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":971 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base"); /* "numpy.pxd":972 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":973 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":975 * return None * else: * return <object>arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("dense_distances"), 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0}, {&__pyx_kp_u_10, __pyx_k_10, sizeof(__pyx_k_10), 0, 1, 0, 0}, {&__pyx_kp_u_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 1, 0, 0}, {&__pyx_kp_s_2, __pyx_k_2, sizeof(__pyx_k_2), 0, 0, 1, 0}, {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, {&__pyx_kp_u_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 1, 0, 0}, {&__pyx_n_s__DTYPE, __pyx_k__DTYPE, sizeof(__pyx_k__DTYPE), 0, 0, 1, 1}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s__dense_distances, __pyx_k__dense_distances, sizeof(__pyx_k__dense_distances), 0, 0, 1, 1}, {&__pyx_n_s__float, __pyx_k__float, sizeof(__pyx_k__float), 0, 0, 1, 1}, {&__pyx_n_s__gram_chisquare, __pyx_k__gram_chisquare, sizeof(__pyx_k__gram_chisquare), 0, 0, 1, 1}, {&__pyx_n_s__gram_euclidean, __pyx_k__gram_euclidean, sizeof(__pyx_k__gram_euclidean), 0, 0, 1, 1}, {&__pyx_n_s__m, __pyx_k__m, sizeof(__pyx_k__m), 0, 0, 1, 1}, {&__pyx_n_s__m1, __pyx_k__m1, sizeof(__pyx_k__m1), 0, 0, 1, 1}, {&__pyx_n_s__m2, __pyx_k__m2, sizeof(__pyx_k__m2), 0, 0, 1, 1}, {&__pyx_n_s__m2m_chisquare, __pyx_k__m2m_chisquare, sizeof(__pyx_k__m2m_chisquare), 0, 0, 1, 1}, {&__pyx_n_s__m2m_euclidean, __pyx_k__m2m_euclidean, sizeof(__pyx_k__m2m_euclidean), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__v2m_chisquare, __pyx_k__v2m_chisquare, sizeof(__pyx_k__v2m_chisquare), 0, 0, 1, 1}, {&__pyx_n_s__v2m_euclidean, __pyx_k__v2m_euclidean, sizeof(__pyx_k__v2m_euclidean), 0, 0, 1, 1}, {&__pyx_n_s__v2v_chisquare, __pyx_k__v2v_chisquare, sizeof(__pyx_k__v2v_chisquare), 0, 0, 1, 1}, {&__pyx_n_s__v2v_euclidean, __pyx_k__v2v_euclidean, sizeof(__pyx_k__v2v_euclidean), 0, 0, 1, 1}, {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, {&__pyx_n_s__x1, __pyx_k__x1, sizeof(__pyx_k__x1), 0, 0, 1, 1}, {&__pyx_n_s__x2, __pyx_k__x2, sizeof(__pyx_k__x2), 0, 0, 1, 1}, {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants"); /* "numpy.pxd":211 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_4)); __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_6)); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "numpy.pxd":253 * if ((descr.byteorder == '>' and little_endian) or * (descr.byteorder == '<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_8 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_8)); __Pyx_INCREF(((PyObject *)__pyx_kp_u_7)); PyTuple_SET_ITEM(__pyx_k_tuple_8, 0, ((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_8)); /* "numpy.pxd":795 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == '>' and little_endian) or */ __pyx_k_tuple_11 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_11)); __Pyx_INCREF(((PyObject *)__pyx_kp_u_10)); PyTuple_SET_ITEM(__pyx_k_tuple_11, 0, ((PyObject *)__pyx_kp_u_10)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_10)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_11)); /* "numpy.pxd":799 * if ((child.byteorder == '>' and little_endian) or * (child.byteorder == '<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_12)); __Pyx_INCREF(((PyObject *)__pyx_kp_u_7)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "numpy.pxd":819 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_14)); __Pyx_INCREF(((PyObject *)__pyx_kp_u_13)); PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_kp_u_13)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_13)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14)); __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is a quiet NaN. */ memset(&__PYX_NAN, 0xFF, sizeof(__PYX_NAN)); PyEval_InitThreads(); if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initdense_distances(void); /*proto*/ PyMODINIT_FUNC initdense_distances(void) #else PyMODINIT_FUNC PyInit_dense_distances(void); /*proto*/ PyMODINIT_FUNC PyInit_dense_distances(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_dense_distances(void)"); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __pyx_binding_PyCFunctionType_USED if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("dense_distances"), __pyx_methods, 0, 0, PYTHON_API_VERSION); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; #if PY_MAJOR_VERSION < 3 Py_INCREF(__pyx_m); #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_dense_distances) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 857; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "dense_distances.pyx":4 * #cython: boundscheck=False * * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * cimport cython */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "dense_distances.pyx":10 * * # data type of numpy arrays (double precision) * DTYPE = np.float # <<<<<<<<<<<<<< * # corresponding compile-time type * ctypedef np.float_t DTYPE_t */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__float); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyObject_SetAttr(__pyx_m, __pyx_n_s__DTYPE, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "dense_distances.pyx":28 * * * def v2v_euclidean(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2): # <<<<<<<<<<<<<< * cdef int d = len(x1) * cdef int _d = len(x2) */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_v2v_euclidean, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__v2v_euclidean, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "dense_distances.pyx":39 * * * def v2v_chisquare(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2): # <<<<<<<<<<<<<< * cdef int d = len(x1) * cdef int _d = len(x2) */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_1v2v_chisquare, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__v2v_chisquare, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "dense_distances.pyx":50 * * * def v2m_euclidean(np.ndarray[DTYPE_t, ndim=1] x, np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<< * """ Euclidean distances between vector x and row vectors in m * """ */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_2v2m_euclidean, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__v2m_euclidean, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "dense_distances.pyx":66 * * * def v2m_chisquare(np.ndarray[DTYPE_t, ndim=1] x, np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<< * """ Chisquare distances between vector x and row vectors in m * """ */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_3v2m_chisquare, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__v2m_chisquare, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "dense_distances.pyx":82 * * * def m2m_euclidean(np.ndarray[DTYPE_t, ndim=2] m1, np.ndarray[DTYPE_t, ndim=2] m2): # <<<<<<<<<<<<<< * """ Parallelized Euclidean distances between row vectors in m1 and m2 * """ */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_4m2m_euclidean, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__m2m_euclidean, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "dense_distances.pyx":101 * * * def m2m_chisquare(np.ndarray[DTYPE_t, ndim=2] m1, np.ndarray[DTYPE_t, ndim=2] m2): # <<<<<<<<<<<<<< * """ Parallelized Chisquare distances between row vectors in m1 and m2 * """ */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_5m2m_chisquare, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__m2m_chisquare, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "dense_distances.pyx":120 * * * def gram_euclidean(np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<< * """ Parallelized Euclidean distances between all row vectors of m * """ */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_6gram_euclidean, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__gram_euclidean, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "dense_distances.pyx":137 * * * def gram_chisquare(np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<< * """ Parallelized Chisquare distances between all row vectors of m * """ */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_7gram_chisquare, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__gram_chisquare, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "dense_distances.pyx":1 * #cython: cdivision=True # <<<<<<<<<<<<<< * #cython: boundscheck=False * */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_2)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; /* "numpy.pxd":971 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { __Pyx_AddTraceback("init dense_distances", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init dense_distances"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AS_STRING(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; } else { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) { #else if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) { #endif goto invalid_keyword_type; } else { for (name = first_kw_arg; *name; name++) { #if PY_MAJOR_VERSION >= 3 if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && PyUnicode_Compare(**name, key) == 0) break; #else if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && _PyString_Eq(**name, key)) break; #endif } if (*name) { values[name-argnames] = value; } else { /* unexpected keyword found */ for (name=argnames; name != first_kw_arg; name++) { if (**name == key) goto arg_passed_twice; #if PY_MAJOR_VERSION >= 3 if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice; #else if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && _PyString_Eq(**name, key)) goto arg_passed_twice; #endif } if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } } } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, **name); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (!type) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (Py_TYPE(obj) == type) return 1; } else { if (PyObject_TypeCheck(obj, type)) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%s' has incorrect type (expected %s, got %s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; int is_complex; char enc_type; char new_packmode; char enc_packmode; } __Pyx_BufFmt_Context; static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'b': return "'char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'h': case 'i': case 'l': case 'q': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset; if (ctx->enc_type == 0) return 0; group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { /* special case -- treat as struct rather than complex number */ size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } __Pyx_BufFmt_RaiseExpected(ctx); return -1; } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %"PY_FORMAT_SIZE_T"d but %"PY_FORMAT_SIZE_T"d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; --ctx->enc_count; /* Consume from buffer string */ /* Done checking, move to next field, pushing or popping struct stack if needed */ while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; /* breaks both loops as ctx->enc_count == 0 */ } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; /* empty struct */ field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case 10: case 13: ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': /* substruct */ { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; } break; case '}': /* end of substruct; either repeat or move on */ ++ts; return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } /* fall through */ case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { /* Continue pooling same type */ ctx->enc_count += ctx->new_count; } else { /* New type */ if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; } ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; default: { int number = __Pyx_BufFmt_ParseNumber(&ts); if (number == -1) { /* First char was not a digit */ PyErr_Format(PyExc_ValueError, "Does not understand character buffer dtype format string ('%c')", *ts); return NULL; } ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%"PY_FORMAT_SIZE_T"d byte%s) does not match size of '%s' (%"PY_FORMAT_SIZE_T"d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { /* cause is unused */ Py_XINCREF(type); Py_XINCREF(value); Py_XINCREF(tb); /* First, check the traceback argument, replacing None with NULL. */ if (tb == Py_None) { Py_DECREF(tb); tb = 0; } else if (tb != NULL && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } /* Next, replace a missing value with None */ if (value == NULL) { value = Py_None; Py_INCREF(value); } #if PY_VERSION_HEX < 0x02050000 if (!PyClass_Check(type)) #else if (!PyType_Check(type)) #endif { /* Raising an instance. The value should be a dummy. */ if (value != Py_None) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } /* Normalize to raise <class>, <instance> */ Py_DECREF(value); value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (!PyExceptionClass_Check(type)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } if (!value) { value = PyObject_CallObject(type, NULL); } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: return; } #endif static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %"PY_FORMAT_SIZE_T"d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %"PY_FORMAT_SIZE_T"d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { if (t == Py_None) { __Pyx_RaiseNoneNotIterableError(); } else if (PyTuple_GET_SIZE(t) < index) { __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); } else { __Pyx_RaiseTooManyValuesError(index); } } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pf_5numpy_7ndarray___getbuffer__(obj, view, flags); else { PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject* obj = view->obj; if (obj) { #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) {PyBuffer_Release(view); return;} #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pf_5numpy_7ndarray_1__releasebuffer__(obj, view); Py_DECREF(obj); view->obj = NULL; } } #endif static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; #if PY_MAJOR_VERSION < 3 py_name = PyString_FromString(class_name); #else py_name = PyUnicode_FromString(class_name); #endif if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && ((PyTypeObject *)result)->tp_basicsize > (Py_ssize_t)size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if (((PyTypeObject *)result)->tp_basicsize != (Py_ssize_t)size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; #if PY_MAJOR_VERSION < 3 py_name = PyString_FromString(name); #else py_name = PyUnicode_FromString(name); #endif if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #include "compile.h" #include "frameobject.h" #include "traceback.h" static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, int __pyx_lineno, const char *__pyx_filename) { PyObject *py_srcfile = 0; PyObject *py_funcname = 0; PyObject *py_globals = 0; PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(__pyx_filename); #else py_srcfile = PyUnicode_FromString(__pyx_filename); #endif if (!py_srcfile) goto bad; if (__pyx_clineno) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_code = PyCode_New( 0, /*int argcount,*/ #if PY_MAJOR_VERSION >= 3 0, /*int kwonlyargcount,*/ #endif 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ __pyx_lineno, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); if (!py_code) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = __pyx_lineno; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */
GB_Matrix_diag.c
//------------------------------------------------------------------------------ // GB_Matrix_diag: construct a diagonal matrix from a vector //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #define GB_FREE_WORKSPACE \ GB_phbix_free (T) ; #define GB_FREE_ALL \ GB_FREE_WORKSPACE ; \ GB_phbix_free (C) ; #include "GB_diag.h" GrB_Info GB_Matrix_diag // construct a diagonal matrix from a vector ( GrB_Matrix C, // output matrix const GrB_Matrix V_in, // input vector (as an n-by-1 matrix) int64_t k, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT_MATRIX_OK (C, "C input for GB_Matrix_diag", GB0) ; ASSERT_MATRIX_OK (V_in, "V input for GB_Matrix_diag", GB0) ; ASSERT (GB_VECTOR_OK (V_in)) ; // V_in is a vector on input ASSERT (!GB_aliased (C, V_in)) ; // C and V_in cannot be aliased ASSERT (!GB_IS_HYPERSPARSE (V_in)) ; // vectors cannot be hypersparse struct GB_Matrix_opaque T_header ; GrB_Matrix T = GB_clear_static_header (&T_header) ; GrB_Type ctype = C->type ; GrB_Type vtype = V_in->type ; int64_t nrows = GB_NROWS (C) ; int64_t ncols = GB_NCOLS (C) ; int64_t n = V_in->vlen + GB_IABS (k) ; // C must be n-by-n if (nrows != ncols || nrows != n) { GB_ERROR (GrB_DIMENSION_MISMATCH, "Input matrix is " GBd "-by-" GBd " but must be " GBd "-by-" GBd "\n", nrows, ncols, n, n) ; } if (!GB_Type_compatible (ctype, vtype)) { GB_ERROR (GrB_DOMAIN_MISMATCH, "Input vector of type [%s] " "cannot be typecast to output of type [%s]\n", vtype->name, ctype->name) ; } //-------------------------------------------------------------------------- // finish any pending work in V_in and clear the output matrix C //-------------------------------------------------------------------------- GB_MATRIX_WAIT (V_in) ; GB_phbix_free (C) ; //-------------------------------------------------------------------------- // ensure V is not bitmap //-------------------------------------------------------------------------- GrB_Matrix V ; if (GB_IS_BITMAP (V_in)) { // make a deep copy of V_in and convert to CSC // set T->iso = V_in->iso OK GB_OK (GB_dup_worker (&T, V_in->iso, V_in, true, NULL, Context)) ; GB_OK (GB_convert_bitmap_to_sparse (T, Context)) ; V = T ; } else { // use V_in as-is V = V_in ; } //-------------------------------------------------------------------------- // allocate C as sparse or hypersparse with vnz entries and vnz vectors //-------------------------------------------------------------------------- // C is sparse if V is dense and k == 0, and hypersparse otherwise const int64_t vnz = GB_nnz (V) ; const bool V_is_full = GB_is_dense (V) ; const int C_sparsity = (V_is_full && k == 0) ? GxB_SPARSE : GxB_HYPERSPARSE; const bool C_iso = V->iso ; if (C_iso) { GBURBLE ("(iso diag) ") ; } const bool csc = C->is_csc ; const float bitmap_switch = C->bitmap_switch ; const int sparsity_control = C->sparsity_control ; // set C->iso = C_iso OK GB_OK (GB_new_bix (&C, C->static_header, // prior static or dynamic header ctype, n, n, GB_Ap_malloc, csc, C_sparsity, false, C->hyper_switch, vnz, vnz, true, C_iso, Context)) ; C->sparsity_control = sparsity_control ; C->bitmap_switch = bitmap_switch ; //-------------------------------------------------------------------------- // handle the CSR/CSC format of C and determine position of diagonal //-------------------------------------------------------------------------- if (!csc) { // The kth diagonal of a CSC matrix is the same as the (-k)th diagonal // of the CSR format, so if C is CSR, negate the value of k. Then // treat C as if it were CSC in the rest of this method. k = -k ; } int64_t kpositive, knegative ; if (k >= 0) { kpositive = k ; knegative = 0 ; } else { kpositive = 0 ; knegative = -k ; } //-------------------------------------------------------------------------- // get the contents of C and determine # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (vnz, chunk, nthreads_max) ; int64_t *restrict Cp = C->p ; int64_t *restrict Ch = C->h ; int64_t *restrict Ci = C->i ; GB_Type_code vcode = vtype->code ; GB_Type_code ccode = ctype->code ; size_t vsize = vtype->size ; //-------------------------------------------------------------------------- // copy the contents of V into the kth diagonal of C //-------------------------------------------------------------------------- if (C_sparsity == GxB_SPARSE) { //---------------------------------------------------------------------- // V is full, or can be treated as full, and k == 0 //---------------------------------------------------------------------- // C->x = (ctype) V->x GB_cast_matrix (C, V, Context) ; // construct Cp and Ci int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < vnz ; p++) { Cp [p] = p ; Ci [p] = p ; } } else if (V_is_full) { //---------------------------------------------------------------------- // V is full, or can be treated as full, and k != 0 //---------------------------------------------------------------------- // C->x = (ctype) V->x GB_cast_matrix (C, V, Context) ; // construct Cp, Ch, and Ci int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < vnz ; p++) { Cp [p] = p ; Ch [p] = p + kpositive ; Ci [p] = p + knegative ; } } else { //---------------------------------------------------------------------- // V is sparse //---------------------------------------------------------------------- // C->x = (ctype) V->x GB_cast_matrix (C, V, Context) ; int64_t *restrict Vi = V->i ; // construct Cp, Ch, and Ci int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < vnz ; p++) { Cp [p] = p ; Ch [p] = Vi [p] + kpositive ; Ci [p] = Vi [p] + knegative ; } } //-------------------------------------------------------------------------- // finalize the matrix C //-------------------------------------------------------------------------- Cp [vnz] = vnz ; C->nvec = vnz ; C->nvec_nonempty = vnz ; C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // free workspace, conform C to its desired format, and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; ASSERT_MATRIX_OK (C, "C before conform for GB_Matrix_diag", GB0) ; GB_OK (GB_conform (C, Context)) ; ASSERT_MATRIX_OK (C, "C output for GB_Matrix_diag", GB0) ; return (GrB_SUCCESS) ; }
omp_task_red_taskloop.c
// RUN: %libomp-compile-and-run // Parsing error until gcc8: // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8 // Parsing error until clang11: // UNSUPPORTED: clang-10, clang-9, clang-8, clang-7 // Missing GOMP_taskgroup_reduction_(un)register in LLVM/OpenMP // Should be removed once the functions are implemented // XFAIL: gcc-9, gcc-10 #include <stdio.h> #include <omp.h> int r; int work(int k, int l) { return k + l + 1; } void bar(int i) { #pragma omp taskgroup task_reduction(+:r) { int th_gen = omp_get_thread_num(); #pragma omp task in_reduction(+:r) firstprivate(i, th_gen) { r += work(i, 0); printf("executing task (%d, 0), th %d (gen by th %d)\n", i, omp_get_thread_num(), th_gen); } #pragma omp task in_reduction(+:r) firstprivate(i, th_gen) { r += work(i, 1); printf("executing task (%d, 1), th %d (gen by th %d)\n", i, omp_get_thread_num(), th_gen); } } } int foo() { int i; int th_gen = omp_get_thread_num(); #pragma omp taskgroup task_reduction(+:r) { bar(0); } printf("th %d passed bar0\n", th_gen); #pragma omp taskloop reduction(+:r) firstprivate(th_gen) for (i = 1; i < 4; ++i) { bar(i); printf("th %d (gen by th %d) passed bar%d in taskloop\n", omp_get_thread_num(), th_gen, i); #pragma omp task in_reduction(+:r) r += i; } return 0; } // res = ((1+2)+(2+3)+(3+4)+(4+5)+1+2+3) = 30 #define res 30 int main() { r = 0; #pragma omp parallel num_threads(2) { // barrier ensures threads have started before tasks creation #pragma omp barrier // single ensures no race condition between taskgroup reductions #pragma omp single nowait foo(); } if (r == res) { return 0; } else { printf("error r = %d (!= %d)\n", r, res); return 1; } }
mm_v1.c
/* * Assignment2 (CSE436) * Kazumi Malhan * 06/08/2016 */ /* Ongoing issues !! */ // Need to put init code back // Need to remove all debug printf // Current code assumes that N and M are dividable by num_tasks #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <sys/timeb.h> /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } /* read timer in ms */ double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } #define REAL float #define VECTOR_LENGTH 512 /* initialize a vector with random floating point numbers */ void init(REAL A[], int N) { int i; for (i = 0; i < N; i++) { //A[i] = (double) drand48(); A[i] = i; } } /* Function Prototypes */ void mm(int N, int K, int M, REAL * A, REAL * B, REAL * C); void mm_parallel_row(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks); void mm_parallel_col(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks); void mm_parallel_rowcol(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks); void mm_parallel_for_row(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks); void mm_parallel_for_col(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks); void mm_parallel_for_rowcol(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks); /** * To compile: gcc mm.c -fopenmp -o mm */ int main(int argc, char *argv[]) { int N = VECTOR_LENGTH; int M = N; int K = N; int num_tasks = 4; double elapsed; /* for timing */ if (argc < 5) { fprintf(stderr, "Usage: mm [<N(%d)>] <K(%d) [<M(%d)>] [<#tasks(%d)>]\n", N,K,M,num_tasks); fprintf(stderr, "\t Example: ./mm %d %d %d %d\n", N,K,M,num_tasks); } else { N = atoi(argv[1]); K = atoi(argv[2]); M = atoi(argv[3]); num_tasks = atoi(argv[4]); } printf("\tC[%d][%d] = A[%d][%d] * B[%d][%d] with %d tasks\n", N, M, N, K, K, M, num_tasks); REAL * A = malloc(sizeof(REAL)*N*K); REAL * B = malloc(sizeof(REAL)*K*M); REAL * C = malloc(sizeof(REAL)*N*M); srand48((1 << 12)); init(A, N*K); init(B, K*M); /* Serial program */ double elapsed_mm = read_timer(); mm(N, K, M, A, B, C); elapsed_mm = (read_timer() - elapsed_mm); /* Parallel program */ double elapsed_mm_parallel_row = read_timer(); mm_parallel_row(N, K, M, A, B, C, num_tasks); elapsed_mm_parallel_row = (read_timer() - elapsed_mm_parallel_row); double elapsed_mm_parallel_col = read_timer(); mm_parallel_col(N, K, M, A, B, C, num_tasks); elapsed_mm_parallel_col = (read_timer() - elapsed_mm_parallel_col); double elapsed_mm_parallel_rowcol = read_timer(); mm_parallel_rowcol(N, K, M, A, B, C, num_tasks); elapsed_mm_parallel_rowcol = (read_timer() - elapsed_mm_parallel_rowcol); /* Parallel for program */ double elapsed_mm_parallel_for_row = read_timer(); mm_parallel_for_row(N, K, M, A, B, C, num_tasks); elapsed_mm_parallel_for_row = (read_timer() - elapsed_mm_parallel_for_row); double elapsed_mm_parallel_for_col = read_timer(); mm_parallel_for_col(N, K, M, A, B, C, num_tasks); elapsed_mm_parallel_for_col = (read_timer() - elapsed_mm_parallel_for_col); double elapsed_mm_parallel_for_rowcol = read_timer(); mm_parallel_for_rowcol(N, K, M, A, B, C, num_tasks); elapsed_mm_parallel_for_rowcol = (read_timer() - elapsed_mm_parallel_for_rowcol); /* you should add the call to each function and time the execution */ printf("======================================================================================================\n"); printf("\tC[%d][%d] = A[%d][%d] * B[%d][%d] with %d tasks\n", N, M, N, K, K, M, num_tasks); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\t\t\tRuntime (ms)\t MFLOPS \n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("mm:\t\t%4f\t%4f\n", elapsed_mm * 1.0e3, M*N*K / (1.0e6 * elapsed_mm)); printf("mm_parallel_row:\t\t%4f\t%4f\n", elapsed_mm_parallel_row * 1.0e3, M*N*K / (1.0e6 * elapsed_mm_parallel_row)); printf("mm_parallel_col:\t\t%4f\t%4f\n", elapsed_mm_parallel_col * 1.0e3, M*N*K / (1.0e6 * elapsed_mm_parallel_col)); printf("mm_parallel_rowcol:\t\t%4f\t%4f\n", elapsed_mm_parallel_rowcol * 1.0e3, M*N*K / (1.0e6 * elapsed_mm_parallel_rowcol)); printf("mm_parallel_for_row:\t\t%4f\t%4f\n", elapsed_mm_parallel_for_row * 1.0e3, M*N*K / (1.0e6 * elapsed_mm_parallel_for_row)); printf("mm_parallel_for_col:\t\t%4f\t%4f\n", elapsed_mm_parallel_for_col * 1.0e3, M*N*K / (1.0e6 * elapsed_mm_parallel_for_col)); printf("mm_parallel_for_rowcol:\t\t%4f\t%4f\n", elapsed_mm_parallel_for_rowcol * 1.0e3, M*N*K / (1.0e6 * elapsed_mm_parallel_for_rowcol)); free(A); free(B); free(C); return 0; } /* Serial */ void mm(int N, int K, int M, REAL * A, REAL * B, REAL * C) { int i, j, w; for (i=0; i<N; i++) for (j=0; j<M; j++) { REAL temp = 0.0; for (w=0; w<K; w++) temp += A[i*K+w]*B[w*M+j]; C[i*M+j] = temp; } } /* Parallel Row */ void mm_parallel_row(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks){ int i, j, w; omp_set_num_threads(num_tasks); #pragma omp parallel shared (N, K, M, A, B, C, num_tasks) private (i, j, w) { int tid, istart, iend; tid = omp_get_thread_num(); istart = tid * (N / num_tasks); iend = (tid + 1) * (N / num_tasks); for (i=istart; i<iend; i++) { /* decompose this loop */ for (j=0; j<M; j++) { REAL temp = 0.0; for (w=0; w<K; w++) temp += A[i*K+w]*B[w*M+j]; C[i*M+j] = temp; } } }/* end of parallel */ } /* Parallel Column */ void mm_parallel_col(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks){ int i, j, w; omp_set_num_threads(num_tasks); #pragma omp parallel shared (N, K, M, A, B, C, num_tasks) private (i, j, w) { int tid, jstart, jend; tid = omp_get_thread_num(); jstart = tid * (M / num_tasks); jend = (tid + 1) * (M / num_tasks); for (i=0; i<N; i++) { for (j=jstart; j<jend; j++) { /* decompose this loop */ REAL temp = 0.0; for (w=0; w<K; w++) temp += A[i*K+w]*B[w*M+j]; C[i*M+j] = temp; } } } /* end of parallel */ } /* Parallel Row Column */ void mm_parallel_rowcol(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks){ int i, j, w; omp_set_num_threads(num_tasks); #pragma omp parallel shared (N, K, M, A, B, C, num_tasks) private (i, j, w) { int tid, istart, jstart, iend, jend; tid = omp_get_thread_num(); istart = tid * (N / num_tasks); iend = (tid + 1) * (N / num_tasks); jstart = tid * (M / num_tasks); jend = (tid + 1) * (M / num_tasks); for (i=istart; i<iend; i++) { /* decompose this loop */ for (j=jstart; j<jend; j++) { /* decompose this loop */ REAL temp = 0.0; for (w=0; w<K; w++) temp += A[i*K+w]*B[w*M+j]; C[i*M+j] = temp; } } } /* end of parallel */ } /* Parallel For Row */ void mm_parallel_for_row(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks){ int i, j, w; omp_set_num_threads(num_tasks); #pragma omp parallel shared (N, K, M, A, B, C, num_tasks) private (i, j, w) { #pragma omp for schedule(static) nowait for (i=0; i<N; i++) { for (j=0; j<M; j++) { REAL temp = 0.0; for (w=0; w<K; w++) temp += A[i*K+w]*B[w*M+j]; C[i*M+j] = temp; } } } /* end of parallel */ } /* Parallel For Column */ void mm_parallel_for_col(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks){ int i, j, w; omp_set_num_threads(num_tasks); #pragma omp parallel shared (N, K, M, A, B, C, num_tasks) private (i, j, w) { for (i=0; i<N; i++) { #pragma omp for schedule(static) nowait for (j=0; j<M; j++) { REAL temp = 0.0; for (w=0; w<K; w++) temp += A[i*K+w]*B[w*M+j]; C[i*M+j] = temp; } } } /* end of parallel */ } /* Parallel For Row Column */ void mm_parallel_for_rowcol(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks){ int i, j, w; omp_set_num_threads(num_tasks); #pragma omp parallel shared (N, K, M, A, B, C, num_tasks) private (i, j, w) { #pragma omp for collapse(2) schedule(static) nowait for (i=0; i<N; i++) { for (j=0; j<M; j++) { REAL temp = 0.0; for (w=0; w<K; w++) temp += A[i*K+w]*B[w*M+j]; C[i*M+j] = temp; } } } /* end of parallel */ }
GB_add_template.c
//------------------------------------------------------------------------------ // GB_add_template: phase1 and phase2 for C=A+B, C<M>=A+B //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Computes C=A+B (no mask) or C<M>=A+B (mask present and not complemented). // Does not handle the case C<!M>=A+B. The complemented mask is handled in // GB_mask instead. If present, the mask M is assumed to be very sparse // compared with A and B. // phase1: does not compute C itself, but just counts the # of entries in each // vector of C. Fine tasks compute the # of entries in their slice of a // single vector of C, and the results are cumsum'd in GB_task_cumsum. // phase2: computes C, using the counts computed by phase1. { //-------------------------------------------------------------------------- // get A, B, M, and C //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ai = A->i ; const int64_t vlen = A->vlen ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bi = B->i ; const int64_t *restrict Mp = NULL ; // const int64_t *restrict Mh = NULL ; const int64_t *restrict Mi = NULL ; const GB_void *restrict Mx = NULL ; GB_cast_function cast_M = NULL ; size_t msize = 0 ; if (M != NULL) { Mp = M->p ; // Mh = M->h ; Mi = M->i ; Mx = M->x ; cast_M = GB_cast_factory (GB_BOOL_code, M->type->code) ; msize = M->type->size ; } #if defined ( GB_PHASE_2_OF_2 ) const GB_ATYPE *restrict Ax = A->x ; const GB_ATYPE *restrict Bx = B->x ; const int64_t *restrict Cp = C->p ; const int64_t *restrict Ch = C->h ; int64_t *restrict Ci = C->i ; GB_CTYPE *restrict Cx = C->x ; #endif //-------------------------------------------------------------------------- // phase1: count entries in each C(:,j); phase2: compute C //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kfirst = TaskList [taskid].kfirst ; int64_t klast = TaskList [taskid].klast ; bool fine_task = (klast == -1) ; int64_t len ; if (fine_task) { // a fine task operates on a slice of a single vector klast = kfirst ; len = TaskList [taskid].len ; } else { // a coarse task operates on one or more whole vectors len = vlen ; } //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of C //------------------------------------------------------------------ int64_t j = (Ch == NULL) ? k : Ch [k] ; #if defined ( GB_PHASE_1_OF_2 ) int64_t cjnz = 0 ; #else int64_t pC, pC_end ; if (fine_task) { // A fine task computes a slice of C(:,j) pC = TaskList [taskid ].pC ; pC_end = TaskList [taskid+1].pC ; ASSERT (Cp [k] <= pC && pC <= pC_end && pC_end <= Cp [k+1]) ; } else { // The vectors of C are never sliced for a coarse task. pC = Cp [k] ; pC_end = Cp [k+1] ; } int64_t cjnz = pC_end - pC ; if (cjnz == 0) continue ; #endif //------------------------------------------------------------------ // get A(:,j) //------------------------------------------------------------------ // GB_GET_MAPPED_VECTOR (pA, pA_end, pA, pA_end, Ap, j, k, C_to_A) ; int64_t pA = -1, pA_end = -1 ; if (fine_task) { // A fine task operates on Ai,Ax [pA...pA_end-1], which is // a subset of the vector A(:,j) pA = TaskList [taskid].pA ; pA_end = TaskList [taskid].pA_end ; } else { // A coarse task operates on the entire vector A (:,j) int64_t kA = (C_to_A == NULL) ? j : C_to_A [k] ; if (kA >= 0) { pA = Ap [kA] ; pA_end = Ap [kA+1] ; } } // ---- int64_t ajnz = pA_end - pA ; // nnz in A(:,j) for this slice bool adense = (ajnz == len) ; int64_t iA_first = -1, iA_last = -1 ; if (ajnz > 0) { // get the first and last indices in A(:,j) for this vector iA_first = Ai [pA] ; iA_last = Ai [pA_end-1] ; } //------------------------------------------------------------------ // get B(:,j) //------------------------------------------------------------------ // GB_GET_MAPPED_VECTOR (pB, pB_end, pB, pB_end, Bp, j, k, C_to_B) ; int64_t pB = -1, pB_end = -1 ; if (fine_task) { // A fine task operates on Bi,Bx [pB...pB_end-1], which is // a subset of the vector B(:,j) pB = TaskList [taskid].pB ; pB_end = TaskList [taskid].pB_end ; } else { // A coarse task operates on the entire vector B (:,j) int64_t kB = (C_to_B == NULL) ? j : C_to_B [k] ; if (kB >= 0) { pB = Bp [kB] ; pB_end = Bp [kB+1] ; } } // ---- int64_t bjnz = pB_end - pB ; // nnz in B(:,j) for this slice bool bdense = (bjnz == len) ; int64_t iB_first = -1, iB_last = -1 ; if (bjnz > 0) { // get the first and last indices in B(:,j) for this vector iB_first = Bi [pB] ; iB_last = Bi [pB_end-1] ; } //------------------------------------------------------------------ // phase1: count nnz (C (:,j)); phase2: compute C(:,j) //------------------------------------------------------------------ if (M == NULL) { //-------------------------------------------------------------- // No mask //-------------------------------------------------------------- // if present, M(:,j) is ignored since !M(:,j) is all true #if defined ( GB_PHASE_1_OF_2 ) if (A_and_B_are_disjoint) { // only used by GB_wait, which computes A+T where T is the // matrix of pending tuples for A. The pattern of pending // tuples is always disjoint with the pattern of A. cjnz = ajnz + bjnz ; } else #endif if (adense && bdense) { //---------------------------------------------------------- // A(:,j) and B(:,j) dense: thus C(:,j) dense //---------------------------------------------------------- ASSERT (ajnz == bjnz) ; ASSERT (iA_first == iB_first) ; ASSERT (iA_last == iB_last ) ; #if defined ( GB_PHASE_1_OF_2 ) cjnz = ajnz ; #else ASSERT (cjnz == ajnz) ; for (int64_t p = 0 ; p < ajnz ; p++) { Ci [pC + p] = p + iA_first ; GB_GETA (aij, Ax, pA + p) ; GB_GETB (bij, Bx, pB + p) ; GB_BINOP (GB_CX (pC + p), aij, bij) ; } #endif } else if (adense) { //---------------------------------------------------------- // A(:,j) dense, B(:,j) sparse: thus C(:,j) dense //---------------------------------------------------------- #if defined ( GB_PHASE_1_OF_2 ) cjnz = ajnz ; #else ASSERT (cjnz == ajnz) ; for (int64_t p = 0 ; p < ajnz ; p++) { Ci [pC + p] = p + iA_first ; GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p) ; } for (int64_t p = 0 ; p < bjnz ; p++) { int64_t ii = Bi [pB + p] - iA_first ; GB_GETA (aij, Ax, pA + ii) ; GB_GETB (bij, Bx, pB + p) ; GB_BINOP (GB_CX (pC + ii), aij, bij) ; } #endif } else if (bdense) { //---------------------------------------------------------- // A(:,j) sparse, B(:,j) dense: thus C(:,j) dense //---------------------------------------------------------- #if defined ( GB_PHASE_1_OF_2 ) cjnz = bjnz ; #else ASSERT (cjnz == bjnz) ; for (int64_t p = 0 ; p < bjnz ; p++) { Ci [pC + p] = p + iB_first ; GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p) ; } for (int64_t p = 0 ; p < ajnz ; p++) { int64_t ii = Ai [pA + p] - iB_first ; GB_GETA (aij, Ax, pA + p) ; GB_GETB (bij, Bx, pB + ii) ; GB_BINOP (GB_CX (pC + ii), aij, bij) ; } #endif } else if (ajnz == 0) { //---------------------------------------------------------- // A(:,j) is empty //---------------------------------------------------------- #if defined ( GB_PHASE_1_OF_2 ) cjnz = bjnz ; #else ASSERT (cjnz == bjnz) ; for (int64_t p = 0 ; p < bjnz ; p++) { Ci [pC + p] = Bi [pB + p] ; GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p) ; } #endif } else if (bjnz == 0) { //---------------------------------------------------------- // B(:,j) is empty //---------------------------------------------------------- #if defined ( GB_PHASE_1_OF_2 ) cjnz = ajnz ; #else ASSERT (cjnz == ajnz) ; for (int64_t p = 0 ; p < ajnz ; p++) { Ci [pC + p] = Ai [pA + p] ; GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p) ; } #endif } else if (iA_last < iB_first) { //---------------------------------------------------------- // last entry of A(:,j) comes before first entry of B(:,j) //---------------------------------------------------------- #if defined ( GB_PHASE_1_OF_2 ) cjnz = ajnz + bjnz ; #else ASSERT (cjnz == ajnz + bjnz) ; for (int64_t p = 0 ; p < ajnz ; p++) { Ci [pC + p] = Ai [pA + p] ; GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p) ; } pC += ajnz ; for (int64_t p = 0 ; p < bjnz ; p++) { Ci [pC + p] = Bi [pB + p] ; GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p) ; } #endif } else if (iB_last < iA_first) { //---------------------------------------------------------- // last entry of B(:,j) comes before first entry of A(:,j) //---------------------------------------------------------- #if defined ( GB_PHASE_1_OF_2 ) cjnz = ajnz + bjnz ; #else ASSERT (cjnz == ajnz + bjnz) ; for (int64_t p = 0 ; p < bjnz ; p++) { Ci [pC + p] = Bi [pB + p] ; GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p) ; } pC += bjnz ; for (int64_t p = 0 ; p < ajnz ; p++) { Ci [pC + p] = Ai [pA + p] ; GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p) ; } #endif } #if defined ( GB_PHASE_1_OF_2 ) else if (ajnz > 32 * bjnz) { //---------------------------------------------------------- // A(:,j) is much denser than B(:,j) //---------------------------------------------------------- // cjnz = ajnz + bjnz - nnz in the intersection cjnz = ajnz + bjnz ; for ( ; pB < pB_end ; pB++) { int64_t i = Bi [pB] ; // find i in A(:,j) int64_t pright = pA_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Ai, pA, pright, found) ; if (found) cjnz-- ; } } else if (bjnz > 32 * ajnz) { //---------------------------------------------------------- // B(:,j) is much denser than A(:,j) //---------------------------------------------------------- // cjnz = ajnz + bjnz - nnz in the intersection cjnz = ajnz + bjnz ; for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; // find i in B(:,j) int64_t pright = pB_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Bi, pB, pright, found) ; if (found) cjnz-- ; } } #endif else { //---------------------------------------------------------- // A(:,j) and B(:,j) have about the same # of entries //---------------------------------------------------------- while (pA < pA_end && pB < pB_end) { int64_t iA = Ai [pA] ; int64_t iB = Bi [pB] ; if (iA < iB) { // C (iA,j) = A (iA,j) #if defined ( GB_PHASE_2_OF_2 ) Ci [pC] = iA ; GB_COPY_A_TO_C (GB_CX (pC), Ax, pA) ; #endif pA++ ; } else if (iA > iB) { // C (iB,j) = B (iB,j) #if defined ( GB_PHASE_2_OF_2 ) Ci [pC] = iB ; GB_COPY_B_TO_C (GB_CX (pC), Bx, pB) ; #endif pB++ ; } else { // C (i,j) = A (i,j) + B (i,j) #if defined ( GB_PHASE_2_OF_2 ) Ci [pC] = iB ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij) ; #endif pA++ ; pB++ ; } #if defined ( GB_PHASE_2_OF_2 ) pC++ ; #else cjnz++ ; #endif } //---------------------------------------------------------- // A (:,j) or B (:,j) have entries left; not both //---------------------------------------------------------- ajnz = (pA_end - pA) ; bjnz = (pB_end - pB) ; ASSERT (ajnz == 0 || bjnz == 0) ; #if defined ( GB_PHASE_1_OF_2 ) cjnz += ajnz + bjnz ; #else for (int64_t p = 0 ; p < ajnz ; p++) { // C (i,j) = A (i,j) Ci [pC + p] = Ai [pA + p] ; GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p) ; } for (int64_t p = 0 ; p < bjnz ; p++) { // C (i,j) = B (i,j) Ci [pC + p] = Bi [pB + p] ; GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p) ; } ASSERT (pC + ajnz + bjnz == pC_end) ; #endif } } else { //-------------------------------------------------------------- // Mask is present //-------------------------------------------------------------- int64_t pM = -1 ; int64_t pM_end = -1 ; if (fine_task) { // A fine task operates on Mi,Mx [pM...pM_end-1], which is // a subset of the vector M(:,j) pM = TaskList [taskid].pM ; pM_end = TaskList [taskid].pM_end ; } else { int64_t kM = -1 ; if (Ch_is_Mh) { // Ch is the same as Mh (a deep copy) ASSERT (Ch != NULL) ; ASSERT (M->h != NULL) ; ASSERT (Ch [k] == M->h [k]) ; kM = k ; } else { kM = (C_to_M == NULL) ? j : C_to_M [k] ; } if (kM >= 0) { pM = Mp [kM] ; pM_end = Mp [kM+1] ; } } //-------------------------------------------------------------- // C(:,j)<M(:,j)> = A(:,j) + B (:,j) //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // get M(i,j) for A(i,j) + B (i,j) //---------------------------------------------------------- int64_t i = Mi [pM] ; bool mij ; cast_M (&mij, Mx +(pM*msize), 0) ; if (!mij) continue ; //---------------------------------------------------------- // get A(i,j) //---------------------------------------------------------- int64_t apright = pA_end - 1 ; bool afound ; GB_BINARY_SEARCH (i, Ai, pA, apright, afound) ; //---------------------------------------------------------- // get B(i,j) //---------------------------------------------------------- int64_t bpright = pB_end - 1 ; bool bfound ; GB_BINARY_SEARCH (i, Bi, pB, bpright, bfound) ; //---------------------------------------------------------- // C(i,j) = A(i,j) + B(i,j) //---------------------------------------------------------- if (afound && bfound) { // C (i,j) = A (i,j) + B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij) ; pC++ ; #endif } else if (afound) { // C (i,j) = A (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else Ci [pC] = i ; GB_COPY_A_TO_C (GB_CX (pC), Ax, pA) ; pC++ ; #endif } else if (bfound) { // C (i,j) = B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else Ci [pC] = i ; GB_COPY_B_TO_C (GB_CX (pC), Bx, pB) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } //------------------------------------------------------------------ // final count of nnz (C (:,j)) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (fine_task) { TaskList [taskid].pC = cjnz ; } else { Cp [k] = cjnz ; } #endif } } }
trmv_x_dia_u_lo.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); for(int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; if(dis < 0) { const ALPHA_INT row_start = -dis; const ALPHA_INT col_start = 0; const ALPHA_INT nnz = m + dis; const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Number v; alpha_mul(v, alpha, A->values[start + row_start + j]); alpha_madde(tmp[threadId][row_start + j], v, x[col_start + j]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); alpha_madde(y[i], alpha, x[i]); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { return ONAME_omp(alpha, A, x, beta, y); }
GB_binop__bset_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bset_int64) // A.*B function (eWiseMult): GB (_AemultB_08__bset_int64) // A.*B function (eWiseMult): GB (_AemultB_02__bset_int64) // A.*B function (eWiseMult): GB (_AemultB_04__bset_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bset_int64) // C+=b function (dense accum): GB (_Cdense_accumb__bset_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int64) // C=scalar+B GB (_bind1st__bset_int64) // C=scalar+B' GB (_bind1st_tran__bset_int64) // C=A+scalar GB (_bind2nd__bset_int64) // C=A'+scalar GB (_bind2nd_tran__bset_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = GB_BITSET (aij, bij, int64_t, 64) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITSET (x, y, int64_t, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_INT64 || GxB_NO_BSET_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bset_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bset_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bset_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bset_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bset_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bset_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bset_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bset_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bset_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITSET (x, bij, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bset_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITSET (aij, y, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (x, aij, int64_t, 64) ; \ } GrB_Info GB (_bind1st_tran__bset_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (aij, y, int64_t, 64) ; \ } GrB_Info GB (_bind2nd_tran__bset_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matvec.c
/* * matvec.c: Example of matrix-vector product in OpenMP. * * (C) 2015 Mikhail Kurnosov <mkurnosov@gmail.com> */ #include <stdio.h> #include <stdlib.h> #include <inttypes.h> #include <omp.h> #include <sys/time.h> /* * Memory consumption: O(m * n + n + m) * C[m] = A[m,n] * B[n] */ enum { m = 20000, n = 20000 }; void *xmalloc(size_t size) { void *p = malloc(size); if (!p) { fprintf(stderr, "malloc failed\n"); exit(EXIT_FAILURE); } return p; } double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } /* matrix_vector_product: Compute matrix-vector product c[m] = a[m][n] * b[n]. */ void matrix_vector_product(double *a, double *b, double *c, int m, int n) { for (int i = 0; i < m; i++) { c[i] = 0.0; for (int j = 0; j < n; j++) c[i] += a[i * n + j] * b[j]; } } /* matrix_vector_product_omp: Compute matrix-vector product c[m] = a[m][n] * b[n] in OpenMP. */ void matrix_vector_product_omp(double *a, double *b, double *c, int m, int n) { #pragma omp parallel { int nthreads = omp_get_num_threads(); int threadid = omp_get_thread_num(); int items_per_thread = m / nthreads; int lb = threadid * items_per_thread; int ub = (threadid == nthreads - 1) ? (m - 1) : (lb + items_per_thread - 1); for (int i = lb; i <= ub; i++) { c[i] = 0.0; for (int j = 0; j < n; j++) c[i] += a[i * n + j] * b[j]; } } } double run_serial() { double *a, *b, *c; // Allocate memory for 2-d array a[m, n] a = xmalloc(sizeof(*a) * m * n); b = xmalloc(sizeof(*b) * n); c = xmalloc(sizeof(*c) * m); for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) a[i * n + j] = i + j; } for (int j = 0; j < n; j++) b[j] = j; double t = wtime(); matrix_vector_product(a, b, c, m, n); t = wtime() - t; printf("Elapsed time (serial): %.6f sec.\n", t); free(a); free(b); free(c); return t; } double run_parallel() { double *a, *b, *c; // Allocate memory for 2-d array a[m, n] a = xmalloc(sizeof(*a) * m * n); b = xmalloc(sizeof(*b) * n); c = xmalloc(sizeof(*c) * m); for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) a[i * n + j] = i + j; } for (int j = 0; j < n; j++) b[j] = j; double t = wtime(); matrix_vector_product_omp(a, b, c, m, n); t = wtime() - t; printf("Elapsed time (parallel): %.6f sec.\n", t); free(a); free(b); free(c); return t; } int main(int argc, char **argv) { printf("Matrix-vector product (c[m] = a[m, n] * b[n]; m = %d, n = %d)\n", m, n); printf("Memory used: %" PRIu64 " MiB\n", (uint64_t)(((double)m * n + m + n) * sizeof(double)) >> 20); double tser = run_serial(); double tpar = run_parallel(); printf("Speedup: %.2f\n", tser / tpar); return 0; }
matrix_multiplication.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main() { int n = 2000; double *A = (double*) malloc(n*n * sizeof(double)); double *B = (double*) malloc(n*n * sizeof(double)); double *C = (double*) malloc(n*n * sizeof(double)); int i, j, k; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { A[i * n + j] = 1 + (rand() % 10)/100.0; B[i * n + j] = 1 + (rand() % 10)/100.0; C[i * n + j] = 0; } } #pragma omp parallel for private(i, j, k) for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { for (k = 0; k < n; k++) { C[i * n + j] += A[i * n + k] + B[k * n + j]; } } } printf("%lf\n", C[0]); }
d3.c
#include <stdio.h> #include <stdlib.h> #include <mpi.h> //#include <omp.h> void fillMatrice(int*** matrice, int matrice_size){ *matrice = malloc(matrice_size * sizeof(int*)); for(int i=0; i<matrice_size; ++i){ (*matrice)[i] = malloc(matrice_size * sizeof(int)); for(int j=0; j<matrice_size; ++j){ scanf("%d", &(*matrice)[i][j]); } } } int** initMatrice(int matrice_size){ int** matrice = malloc(matrice_size * sizeof(int*)); for(int i=0; i<matrice_size; ++i){ matrice[i] = malloc(matrice_size * sizeof(int)); for(int j=0; j<matrice_size; ++j){ matrice[i][j] = 0; } } return matrice; } void printMatrice(int** matrice, int matrice_size){ for(int i=0; i<matrice_size; ++i){ for(int j=0; j<matrice_size; ++j){ printf("%5d ", matrice[i][j]); } printf("\n"); } } void arrayBroadcast(int** a, int matrice_size, int master){ for(int i = 0; i < matrice_size; ++i){ MPI_Bcast(a[i], matrice_size, MPI_INT, master, MPI_COMM_WORLD); } } int main (int argc, char *argv[]) { int id, p; int matrice_size, from, to; int** a; int** b; double elapsed_time; int *rcvcounts; int *displs; int *positions; MPI_Init(&argc, &argv); MPI_Barrier(MPI_COMM_WORLD); MPI_Comm_rank(MPI_COMM_WORLD, &id); MPI_Comm_size(MPI_COMM_WORLD, &p); rcvcounts = calloc(p, sizeof(int)); displs = calloc(p, sizeof(int)); positions = calloc(p, sizeof(int)); if(id == 0){ scanf("%d", &matrice_size); fillMatrice(&a, matrice_size); fillMatrice(&b, matrice_size); elapsed_time = -MPI_Wtime(); p = p>matrice_size ? matrice_size : p; int modulo = matrice_size % p; int nb = 0; for(int i = 0; i < p ;++i){ int numberElements = matrice_size/p; if(modulo > 0){ ++numberElements; --modulo; } if(i == 0) positions[i] = numberElements; else positions[i] = positions[i-1] + numberElements; numberElements *= matrice_size; rcvcounts[i] = numberElements; displs[i] = nb; nb += numberElements; } } MPI_Bcast(&matrice_size, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(rcvcounts, p, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(displs, p, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(positions, p, MPI_INT, 0, MPI_COMM_WORLD); if(id != 0){ a=initMatrice(matrice_size); b=initMatrice(matrice_size); } int *c = malloc(sizeof(int) * matrice_size * matrice_size); arrayBroadcast(a, matrice_size, 0); arrayBroadcast(b, matrice_size, 0); to = positions[id]; from = to - (rcvcounts[id] / matrice_size); // #pragma omp parallel for for(int i =from; i<to; ++i) { for(int j = 0; j<matrice_size; ++j){ int su = 0; for(int k=0; k<matrice_size; ++k){ su += a[i][k] * b[k][j]; } c[i *matrice_size + j] = su; } } MPI_Gatherv(&c[from * matrice_size], rcvcounts[id], MPI_INT, c, rcvcounts, displs, MPI_INT, 0, MPI_COMM_WORLD); if(id == 0 && argc > 1 && argv[1][0] == '-' && argv[1][1] == 'p'){ printf("-------------\n"); for(int i=0; i<matrice_size; ++i){ for(int j=0; j<matrice_size; ++j){ printf("%5d ", c[i * matrice_size + j]); } printf("\n"); } printf("-------------\n"); } elapsed_time +=MPI_Wtime(); //if(!id){ //printf("%10.6f\n", elapsed_time); //} MPI_Finalize(); return 0; }
RecordTable.h
/* * Souffle - A Datalog Compiler * Copyright (c) 2020, The Souffle Developers. All rights reserved. * Licensed under the Universal Permissive License v 1.0 as shown at: * - https://opensource.org/licenses/UPL * - <souffle root>/licenses/SOUFFLE-UPL.txt */ /************************************************************************ * * @file RecordTable.h * * Data container implementing a map between records and their references. * Records are separated by arity, i.e., stored in different RecordMaps. * ***********************************************************************/ #pragma once #include "CompiledTuple.h" #include "ParallelUtils.h" #include "RamTypes.h" #include <cassert> #include <iostream> #include <limits> #include <map> #include <unordered_map> #include <vector> namespace souffle { /** @brief Bidirectional mappping between records and record references */ class RecordMap { /** arity of record */ const size_t arity; /** hash function for unordered record map */ struct RecordHash { std::size_t operator()(std::vector<RamDomain> record) const { std::size_t seed = 0; std::hash<RamDomain> domainHash; for (RamDomain value : record) { seed ^= domainHash(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2); } return seed; } }; /** map from records to references */ // TODO (b-scholz): replace vector<RamDomain> with something more memory-frugal std::unordered_map<std::vector<RamDomain>, RamDomain, RecordHash> recordToIndex; /** array of records; index represents record reference */ // TODO (b-scholz): replace vector<RamDomain> with something more memory-frugal std::vector<std::vector<RamDomain>> indexToRecord; public: explicit RecordMap(size_t arity) : arity(arity), indexToRecord(1) {} // note: index 0 element left free /** @brief converts record to a record reference */ // TODO (b-scholz): replace vector<RamDomain> with something more memory-frugal RamDomain pack(const std::vector<RamDomain>& vector) { RamDomain index; #pragma omp critical(record_pack) { auto pos = recordToIndex.find(vector); if (pos != recordToIndex.end()) { index = pos->second; } else { #pragma omp critical(record_unpack) { indexToRecord.push_back(vector); index = indexToRecord.size() - 1; recordToIndex[vector] = index; // assert that new index is smaller than the range assert(index != std::numeric_limits<RamDomain>::max()); } } } return index; } /** @brief convert record pointer to a record reference */ RamDomain pack(const RamDomain* tuple) { // TODO (b-scholz): data is unnecessarily copied // for a successful lookup. To avoid this, we should // compute a hash of the pointer-array and traverse through // the bucket list of the unordered map finding the record. // Note that in case of non-existence, the record still needs to be // copied for the newly created entry but this will be the less // frequent case. std::vector<RamDomain> tmp(arity); for (size_t i = 0; i < arity; i++) { tmp[i] = tuple[i]; } return pack(tmp); } /** @brief convert record reference to a record pointer */ const RamDomain* unpack(RamDomain index) const { const RamDomain* res; #pragma omp critical(record_unpack) res = indexToRecord[index].data(); return res; } }; class RecordTable { public: RecordTable() = default; virtual ~RecordTable() = default; /** @brief convert record to record reference */ RamDomain pack(RamDomain* tuple, size_t arity) { return lookupArity(arity).pack(tuple); } /** @brief convert record reference to a record */ const RamDomain* unpack(RamDomain ref, size_t arity) const { auto iter = maps.find(arity); assert(iter != maps.end() && "Attempting to unpack non-existing record"); return (iter->second).unpack(ref); } private: /** @brief lookup RecordMap for a given arity; if it does not exist, create new RecordMap */ RecordMap& lookupArity(size_t arity) { std::unordered_map<size_t, RecordMap>::iterator mapsIterator; #pragma omp critical(RecordTableGetForArity) { // This will create a new map if it doesn't exist yet. mapsIterator = maps.emplace(arity, arity).first; } return mapsIterator->second; } /** Arity/RecordMap association */ std::unordered_map<size_t, RecordMap> maps; }; /** @brief helper to convert tuple to record reference for the synthesiser */ template <std::size_t Arity> inline RamDomain pack(RecordTable& recordTab, Tuple<RamDomain, Arity> tuple) { return recordTab.pack(static_cast<RamDomain*>(tuple.data), Arity); } } // namespace souffle
wino_conv_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: haoluo@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "wino_conv_kernel_x86.h" #define TILE 4 #define ELEM_SIZE ((TILE + 2) * (TILE + 2)) #define WINO_MAX(a, b) ((a) > (b) ? (a) : (b)) #define WINO_MIN(a, b) ((a) < (b) ? (a) : (b)) static void relu(float* data, int size, int activation) { for (int i = 0; i < size; i++) { data[i] = WINO_MAX(data[i], ( float )0); if (activation > 0) { data[i] = WINO_MIN(data[i], ( float )activation); } } } static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param) { int output_c = filter->dims[0]; int input_c = filter->dims[1]; int trans_ker_size = output_c * input_c * ELEM_SIZE * sizeof(float); return trans_ker_size + 128; // caution } static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, m * n * sizeof(float)); return; } for (i = 0; i < m; ++i) { memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float)); } } // pad 0 in right and down side on 3D void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, c * m * n * sizeof(float)); return; } for (i = 0; i < c; ++i) { pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w); } } static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, m * n * sizeof(float)); return; } for (i = 0; i < m; ++i) { memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float)); } } // pad 0 in right and down side on 3D void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, c * m * n * sizeof(float)); return; } for (i = 0; i < c; ++i) { delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w); } } void conv3x3s1_winograd43_sse(float* bottom_blob, float* top_blob, float* kernel_tm_test, float* dot_block, float* transform_input, float* output_bordered, float* _bias, int w, int h, int inch, int outw, int outh, int outch, int num_thread) { size_t elemsize = sizeof(float); const float* bias = _bias; // pad to 4n+2, winograd F(4,3) float* bottom_blob_bordered = bottom_blob; int outw_align = (outw + 3) / 4 * 4; int outh_align = (outh + 3) / 4 * 4; w = outw_align + 2; h = outh_align + 2; // BEGIN transform input float* bottom_blob_tm = NULL; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 4 * inch * tiles; bottom_blob_tm = transform_input; // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #if __AVX__ __m256 _1_n = _mm256_set1_ps(-1); __m256 _2_p = _mm256_set1_ps(2); __m256 _2_n = _mm256_set1_ps(-2); __m256 _4_p = _mm256_set1_ps(4); __m256 _4_n = _mm256_set1_ps(-4); __m256 _5_n = _mm256_set1_ps(-5); #endif #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < inch; q++) { const float* img = bottom_blob_bordered + q * w * h; for (int j = 0; j < nColBlocks; j++) { const float* r0 = img + w * j * 4; const float* r1 = r0 + w; const float* r2 = r1 + w; const float* r3 = r2 + w; const float* r4 = r3 + w; const float* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { float* out_tm0 = bottom_blob_tm + 4 * inch * (j * nRowBlocks + i) + 4 * q; float* out_tm1 = out_tm0 + tiles_n; float* out_tm2 = out_tm0 + 2 * tiles_n; float* out_tm3 = out_tm0 + 3 * tiles_n; float* out_tm4 = out_tm0 + 4 * tiles_n; float* out_tm5 = out_tm0 + 5 * tiles_n; float* out_tm6 = out_tm0 + 6 * tiles_n; float* out_tm7 = out_tm0 + 7 * tiles_n; float* out_tm8 = out_tm0 + 8 * tiles_n; #if __AVX__ __m256 _d0, _d1, _d2, _d3, _d4, _d5; __m256 _w0, _w1, _w2, _w3, _w4, _w5; __m256 _t0, _t1, _t2, _t3, _t4, _t5; __m256 _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = _mm256_loadu_ps(r0); _d1 = _mm256_loadu_ps(r1); _d2 = _mm256_loadu_ps(r2); _d3 = _mm256_loadu_ps(r3); _d4 = _mm256_loadu_ps(r4); _d5 = _mm256_loadu_ps(r5); // w = B_t * d _w0 = _mm256_mul_ps(_d0, _4_p); _w0 = _mm256_fmadd_ps(_d2, _5_n, _w0); _w0 = _mm256_add_ps(_w0, _d4); _w1 = _mm256_mul_ps(_d1, _4_n); _w1 = _mm256_fmadd_ps(_d2, _4_n, _w1); _w1 = _mm256_add_ps(_w1, _d3); _w1 = _mm256_add_ps(_w1, _d4); _w2 = _mm256_mul_ps(_d1, _4_p); _w2 = _mm256_fmadd_ps(_d2, _4_n, _w2); _w2 = _mm256_fmadd_ps(_d3, _1_n, _w2); _w2 = _mm256_add_ps(_w2, _d4); _w3 = _mm256_mul_ps(_d1, _2_n); _w3 = _mm256_fmadd_ps(_d2, _1_n, _w3); _w3 = _mm256_fmadd_ps(_d3, _2_p, _w3); _w3 = _mm256_add_ps(_w3, _d4); _w4 = _mm256_mul_ps(_d1, _2_p); _w4 = _mm256_fmadd_ps(_d2, _1_n, _w4); _w4 = _mm256_fmadd_ps(_d3, _2_n, _w4); _w4 = _mm256_add_ps(_w4, _d4); _w5 = _mm256_mul_ps(_d1, _4_p); _w5 = _mm256_fmadd_ps(_d3, _5_n, _w5); _w5 = _mm256_add_ps(_w5, _d5); // transpose d to d_t #ifdef _WIN32 { _t0.m256_f32[0] = _w0.m256_f32[0]; _t1.m256_f32[0] = _w0.m256_f32[1]; _t2.m256_f32[0] = _w0.m256_f32[2]; _t3.m256_f32[0] = _w0.m256_f32[3]; _t4.m256_f32[0] = _w0.m256_f32[4]; _t5.m256_f32[0] = _w0.m256_f32[5]; _t0.m256_f32[1] = _w1.m256_f32[0]; _t1.m256_f32[1] = _w1.m256_f32[1]; _t2.m256_f32[1] = _w1.m256_f32[2]; _t3.m256_f32[1] = _w1.m256_f32[3]; _t4.m256_f32[1] = _w1.m256_f32[4]; _t5.m256_f32[1] = _w1.m256_f32[5]; _t0.m256_f32[2] = _w2.m256_f32[0]; _t1.m256_f32[2] = _w2.m256_f32[1]; _t2.m256_f32[2] = _w2.m256_f32[2]; _t3.m256_f32[2] = _w2.m256_f32[3]; _t4.m256_f32[2] = _w2.m256_f32[4]; _t5.m256_f32[2] = _w2.m256_f32[5]; _t0.m256_f32[3] = _w3.m256_f32[0]; _t1.m256_f32[3] = _w3.m256_f32[1]; _t2.m256_f32[3] = _w3.m256_f32[2]; _t3.m256_f32[3] = _w3.m256_f32[3]; _t4.m256_f32[3] = _w3.m256_f32[4]; _t5.m256_f32[3] = _w3.m256_f32[5]; _t0.m256_f32[4] = _w4.m256_f32[0]; _t1.m256_f32[4] = _w4.m256_f32[1]; _t2.m256_f32[4] = _w4.m256_f32[2]; _t3.m256_f32[4] = _w4.m256_f32[3]; _t4.m256_f32[4] = _w4.m256_f32[4]; _t5.m256_f32[4] = _w4.m256_f32[5]; _t0.m256_f32[5] = _w5.m256_f32[0]; _t1.m256_f32[5] = _w5.m256_f32[1]; _t2.m256_f32[5] = _w5.m256_f32[2]; _t3.m256_f32[5] = _w5.m256_f32[3]; _t4.m256_f32[5] = _w5.m256_f32[4]; _t5.m256_f32[5] = _w5.m256_f32[5]; } #else { _t0[0] = _w0[0]; _t1[0] = _w0[1]; _t2[0] = _w0[2]; _t3[0] = _w0[3]; _t4[0] = _w0[4]; _t5[0] = _w0[5]; _t0[1] = _w1[0]; _t1[1] = _w1[1]; _t2[1] = _w1[2]; _t3[1] = _w1[3]; _t4[1] = _w1[4]; _t5[1] = _w1[5]; _t0[2] = _w2[0]; _t1[2] = _w2[1]; _t2[2] = _w2[2]; _t3[2] = _w2[3]; _t4[2] = _w2[4]; _t5[2] = _w2[5]; _t0[3] = _w3[0]; _t1[3] = _w3[1]; _t2[3] = _w3[2]; _t3[3] = _w3[3]; _t4[3] = _w3[4]; _t5[3] = _w3[5]; _t0[4] = _w4[0]; _t1[4] = _w4[1]; _t2[4] = _w4[2]; _t3[4] = _w4[3]; _t4[4] = _w4[4]; _t5[4] = _w4[5]; _t0[5] = _w5[0]; _t1[5] = _w5[1]; _t2[5] = _w5[2]; _t3[5] = _w5[3]; _t4[5] = _w5[4]; _t5[5] = _w5[5]; } #endif // d = B_t * d_t _n0 = _mm256_mul_ps(_t0, _4_p); _n0 = _mm256_fmadd_ps(_t2, _5_n, _n0); _n0 = _mm256_add_ps(_n0, _t4); _n1 = _mm256_mul_ps(_t1, _4_n); _n1 = _mm256_fmadd_ps(_t2, _4_n, _n1); _n1 = _mm256_add_ps(_n1, _t3); _n1 = _mm256_add_ps(_n1, _t4); _n2 = _mm256_mul_ps(_t1, _4_p); _n2 = _mm256_fmadd_ps(_t2, _4_n, _n2); _n2 = _mm256_fmadd_ps(_t3, _1_n, _n2); _n2 = _mm256_add_ps(_n2, _t4); _n3 = _mm256_mul_ps(_t1, _2_n); _n3 = _mm256_fmadd_ps(_t2, _1_n, _n3); _n3 = _mm256_fmadd_ps(_t3, _2_p, _n3); _n3 = _mm256_add_ps(_n3, _t4); _n4 = _mm256_mul_ps(_t1, _2_p); _n4 = _mm256_fmadd_ps(_t2, _1_n, _n4); _n4 = _mm256_fmadd_ps(_t3, _2_n, _n4); _n4 = _mm256_add_ps(_n4, _t4); _n5 = _mm256_mul_ps(_t1, _4_p); _n5 = _mm256_fmadd_ps(_t3, _5_n, _n5); _n5 = _mm256_add_ps(_n5, _t5); // save to out_tm float output_n0[8] = {0.f}; _mm256_storeu_ps(output_n0, _n0); float output_n1[8] = {0.f}; _mm256_storeu_ps(output_n1, _n1); float output_n2[8] = {0.f}; _mm256_storeu_ps(output_n2, _n2); float output_n3[8] = {0.f}; _mm256_storeu_ps(output_n3, _n3); float output_n4[8] = {0.f}; _mm256_storeu_ps(output_n4, _n4); float output_n5[8] = {0.f}; _mm256_storeu_ps(output_n5, _n5); out_tm0[0] = output_n0[0]; out_tm0[1] = output_n0[1]; out_tm0[2] = output_n0[2]; out_tm0[3] = output_n0[3]; out_tm1[0] = output_n0[4]; out_tm1[1] = output_n0[5]; out_tm1[2] = output_n1[0]; out_tm1[3] = output_n1[1]; out_tm2[0] = output_n1[2]; out_tm2[1] = output_n1[3]; out_tm2[2] = output_n1[4]; out_tm2[3] = output_n1[5]; out_tm3[0] = output_n2[0]; out_tm3[1] = output_n2[1]; out_tm3[2] = output_n2[2]; out_tm3[3] = output_n2[3]; out_tm4[0] = output_n2[4]; out_tm4[1] = output_n2[5]; out_tm4[2] = output_n3[0]; out_tm4[3] = output_n3[1]; out_tm5[0] = output_n3[2]; out_tm5[1] = output_n3[3]; out_tm5[2] = output_n3[4]; out_tm5[3] = output_n3[5]; out_tm6[0] = output_n4[0]; out_tm6[1] = output_n4[1]; out_tm6[2] = output_n4[2]; out_tm6[3] = output_n4[3]; out_tm7[0] = output_n4[4]; out_tm7[1] = output_n4[5]; out_tm7[2] = output_n5[0]; out_tm7[3] = output_n5[1]; out_tm8[0] = output_n5[2]; out_tm8[1] = output_n5[3]; out_tm8[2] = output_n5[4]; out_tm8[3] = output_n5[5]; #else float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6]; float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6]; float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n]; w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n]; w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n]; w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n]; w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n]; w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t4[0] = w0[4]; t5[0] = w0[5]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t4[1] = w1[4]; t5[1] = w1[5]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t4[2] = w2[4]; t5[2] = w2[5]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; t4[3] = w3[4]; t5[3] = w3[5]; t0[4] = w4[0]; t1[4] = w4[1]; t2[4] = w4[2]; t3[4] = w4[3]; t4[4] = w4[4]; t5[4] = w4[5]; t0[5] = w5[0]; t1[5] = w5[1]; t2[5] = w5[2]; t3[5] = w5[3]; t4[5] = w5[4]; t5[5] = w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n]; d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n]; d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n]; d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n]; d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n]; d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n]; } // save to out_tm { out_tm0[0] = d0[0]; out_tm0[1] = d0[1]; out_tm0[2] = d0[2]; out_tm0[3] = d0[3]; out_tm1[0] = d0[4]; out_tm1[1] = d0[5]; out_tm1[2] = d1[0]; out_tm1[3] = d1[1]; out_tm2[0] = d1[2]; out_tm2[1] = d1[3]; out_tm2[2] = d1[4]; out_tm2[3] = d1[5]; out_tm3[0] = d2[0]; out_tm3[1] = d2[1]; out_tm3[2] = d2[2]; out_tm3[3] = d2[3]; out_tm4[0] = d2[4]; out_tm4[1] = d2[5]; out_tm4[2] = d3[0]; out_tm4[3] = d3[1]; out_tm5[0] = d3[2]; out_tm5[1] = d3[3]; out_tm5[2] = d3[4]; out_tm5[3] = d3[5]; out_tm6[0] = d4[0]; out_tm6[1] = d4[1]; out_tm6[2] = d4[2]; out_tm6[3] = d4[3]; out_tm7[0] = d4[4]; out_tm7[1] = d4[5]; out_tm7[2] = d5[0]; out_tm7[3] = d5[1]; out_tm8[0] = d5[2]; out_tm8[1] = d5[3]; out_tm8[2] = d5[4]; out_tm8[3] = d5[5]; } #endif // __AVX__ r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } // BEGIN dot float* top_blob_tm = NULL; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 36 * tiles; top_blob_tm = dot_block; #pragma omp parallel for num_threads(num_thread) for (int r = 0; r < 9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp << 3; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); float* output4_tm = top_blob_tm + tiles_n * (p + 4); float* output5_tm = top_blob_tm + tiles_n * (p + 5); float* output6_tm = top_blob_tm + tiles_n * (p + 6); float* output7_tm = top_blob_tm + tiles_n * (p + 7); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; output4_tm = output4_tm + r * 4; output5_tm = output5_tm + r * 4; output6_tm = output6_tm + r * 4; output7_tm = output7_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + p / 8 * inch * 32; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); __m128 _sum4 = _mm_broadcast_ss(&zero_val); __m128 _sum5 = _mm_broadcast_ss(&zero_val); __m128 _sum6 = _mm_broadcast_ss(&zero_val); __m128 _sum7 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); __m128 _sum4 = _mm_set1_ps(0.f); __m128 _sum5 = _mm_set1_ps(0.f); __m128 _sum6 = _mm_set1_ps(0.f); __m128 _sum7 = _mm_set1_ps(0.f); #endif int q = 0; for (; q + 3 < inch; q = q + 4) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _r1 = _mm_loadu_ps(r0 + 4); __m128 _r2 = _mm_loadu_ps(r0 + 8); __m128 _r3 = _mm_loadu_ps(r0 + 12); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r1, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r1, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r1, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r1, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r1, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r1, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r1, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r1, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r2, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r2, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r2, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r2, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r2, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r2, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r2, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r2, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r3, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r3, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r3, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r3, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r3, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r3, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r3, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r3, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7)); #endif kptr += 32; r0 += 16; } for (; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); _mm_storeu_ps(output4_tm, _sum4); _mm_storeu_ps(output5_tm, _sum5); _mm_storeu_ps(output6_tm, _sum6); _mm_storeu_ps(output7_tm, _sum7); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; float sum4[4] = {0}; float sum5[4] = {0}; float sum6[4] = {0}; float sum7[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; sum4[n] += r0[n] * kptr[n + 16]; sum5[n] += r0[n] * kptr[n + 20]; sum6[n] += r0[n] * kptr[n + 24]; sum7[n] += r0[n] * kptr[n + 28]; } kptr += 32; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4) * inch * 16; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); #endif for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); #endif kptr += 16; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; } kptr += 16; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm + 36 * tiles * p; output0_tm = output0_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); #endif for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); #endif kptr += 4; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); #else float sum0[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; } #endif // __AVX__ || __SSE__ output0_tm += 36; } } } } // END dot // BEGIN transform output float* top_blob_bordered = NULL; if (outw_align == outw && outh_align == outh) { top_blob_bordered = top_blob; } else { top_blob_bordered = output_bordered; } { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { float* out_tile = top_blob_tm + 36 * tiles * p; float* outRow0 = top_blob_bordered + outw_align * outh_align * p; float* outRow1 = outRow0 + outw_align; float* outRow2 = outRow0 + outw_align * 2; float* outRow3 = outRow0 + outw_align * 3; const float bias0 = bias ? bias[p] : 0.f; for (int j = 0; j < nColBlocks; j++) { for (int i = 0; i < nRowBlocks; i++) { // TODO AVX2 float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6]; float w0[6], w1[6], w2[6], w3[6]; float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4]; float o0[4], o1[4], o2[4], o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 6]; s2[n] = out_tile[n + 12]; s3[n] = out_tile[n + 18]; s4[n] = out_tile[n + 24]; s5[n] = out_tile[n + 30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]; w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]; w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n]; o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n]; o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] + bias0; outRow1[n] = o1[n] + bias0; outRow2[n] = o2[n] + bias0; outRow3[n] = o3[n] + bias0; } out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw_align * 3; outRow1 += outw_align * 3; outRow2 += outw_align * 3; outRow3 += outw_align * 3; } } } // END transform output if (outw_align != outw || outh_align != outw) { delete_0_3D(top_blob, top_blob_bordered, outh_align, outw_align, outh, outw, outch, 0, 0); } } void conv3x3s1_winograd43_transform_kernel_sse(const float* kernel, float* kernel_wino, int inch, int outch) { float* kernel_tm = ( float* )sys_malloc(6 * 6 * inch * outch * sizeof(float)); // G const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f}}; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36; // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3] = {0}; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } float* kernel_tm_test = kernel_wino; for (int r = 0; r < 9; r++) { int p = 0; for (; p + 7 < outch; p += 8) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36; const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36; const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36; const float* kernel4 = ( const float* )kernel_tm + (p + 4) * inch * 36; const float* kernel5 = ( const float* )kernel_tm + (p + 5) * inch * 36; const float* kernel6 = ( const float* )kernel_tm + (p + 6) * inch * 36; const float* kernel7 = ( const float* )kernel_tm + (p + 7) * inch * 36; float* ktmp = kernel_tm_test + p / 8 * inch * 32; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp[16] = kernel4[r * 4 + 0]; ktmp[17] = kernel4[r * 4 + 1]; ktmp[18] = kernel4[r * 4 + 2]; ktmp[19] = kernel4[r * 4 + 3]; ktmp[20] = kernel5[r * 4 + 0]; ktmp[21] = kernel5[r * 4 + 1]; ktmp[22] = kernel5[r * 4 + 2]; ktmp[23] = kernel5[r * 4 + 3]; ktmp[24] = kernel6[r * 4 + 0]; ktmp[25] = kernel6[r * 4 + 1]; ktmp[26] = kernel6[r * 4 + 2]; ktmp[27] = kernel6[r * 4 + 3]; ktmp[28] = kernel7[r * 4 + 0]; ktmp[29] = kernel7[r * 4 + 1]; ktmp[30] = kernel7[r * 4 + 2]; ktmp[31] = kernel7[r * 4 + 3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p + 3 < outch; p += 4) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36; const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36; const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36; float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4) * inch * 16; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p < outch; p++) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp += 4; kernel0 += 36; } } kernel_tm_test += 4 * inch * outch; } free(kernel_tm); } int wino_conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int batch = input_tensor->dims[0]; int input_c = input_tensor->dims[1]; int input_h = input_tensor->dims[2]; int input_w = input_tensor->dims[3]; int output_c = output_tensor->dims[1]; int output_h = output_tensor->dims[2]; int output_w = output_tensor->dims[3]; int pad_h = param->pad_h0; int pad_w = param->pad_w0; float* kernel = ( float* )filter_tensor->data; if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor, param); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } int block_h = (output_h + TILE - 1) / TILE; int block_w = (output_w + TILE - 1) / TILE; int block = block_h * block_w; int padded_inh = TILE * block_h + 2; int padded_inw = TILE * block_w + 2; int pad_inhw = padded_inh * padded_inw; int outw = block_w * TILE; int outh = block_h * TILE; priv_info->input_pad = ( float* )sys_malloc(batch * input_c * pad_inhw * sizeof(float)); memset(priv_info->input_pad, 0, batch * input_c * pad_inhw * sizeof(float)); priv_info->dot_block = ( float* )sys_malloc(ELEM_SIZE * block * output_c * sizeof(float)); priv_info->transform_input = ( float* )sys_malloc(ELEM_SIZE * block * input_c * sizeof(float)); priv_info->output_bordered = NULL; if (outw != output_w || outh != output_h) { priv_info->output_bordered = ( float* )sys_malloc(outw * outh * output_c * sizeof(float)); } conv3x3s1_winograd43_transform_kernel_sse(kernel, ( float* )priv_info->interleave_buffer, input_c, output_c); return 0; } int wino_conv_hcl_postrun(struct conv_priv_info* priv_info) { if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } if (priv_info->input_pad) { sys_free(priv_info->input_pad); priv_info->input_pad = NULL; } if (priv_info->dot_block) { sys_free(priv_info->dot_block); priv_info->dot_block = NULL; } if (priv_info->transform_input) { sys_free(priv_info->transform_input); priv_info->transform_input = NULL; } if (priv_info->output_bordered) { sys_free(priv_info->output_bordered); priv_info->output_bordered = NULL; } return 0; } int wino_conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { /* param */ int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int act_type = param->activation; int group = param->group; int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1]; int in_c_g = input_tensor->dims[1] / group; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int input_size_g = in_c_g * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = output_tensor->dims[1]; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; int out_c_align = ((out_c + 3) & -4); /* wino param */ int block_h = (out_h + TILE - 1) / TILE; int block_w = (out_w + TILE - 1) / TILE; int block_hw = block_h * block_w; int padded_in_h = block_h * TILE + 2; int padded_in_w = block_w * TILE + 2; int padded_in_hw = padded_in_h * padded_in_w; /* buffer addr */ float* input = ( float* )input_tensor->data; float* output = ( float* )output_tensor->data; float* biases = NULL; if (bias_tensor != NULL) biases = ( float* )bias_tensor->data; for (int i = 0; i < batch; i++) { for (int g = 0; g < group; g++) { pad_0_align_3D(priv_info->input_pad + i * in_c * padded_in_h * padded_in_w, input + i * in_c * in_h * in_w, in_h, in_w, padded_in_h, padded_in_w, in_c, pad_h0, pad_w0); conv3x3s1_winograd43_sse(priv_info->input_pad + i * in_c * padded_in_h * padded_in_w + g * input_size_g, output + i * out_c * out_h * out_w, priv_info->interleave_buffer, priv_info->dot_block, priv_info->transform_input, priv_info->output_bordered, biases, padded_in_w, padded_in_h, in_c, out_w, out_h, out_c, num_thread); } } if (act_type >= 0) { relu(output, batch * output_size, act_type); } return 0; }
linearFiltering-laplacian.c
/************************************************************************* * linearFiltering-laplacian.c - * * $Id$ * * Copyright (c) INRIA 2012, all rights reserved * * AUTHOR: * Gregoire Malandain (gregoire.malandain@inria.fr) * * CREATION DATE: * Wed Dec 26 22:19:17 CET 2012 * * ADDITIONS, CHANGES * */ /* WARNING, this file is not aimed to be computed * it is included from linearFiltering.c */ int laplacian2D( void *bufferIn, bufferType typeIn, void *bufferOut, bufferType typeOut, int *bufferDims, int *borderLengths, typeFilteringCoefficients *theFilter ) { char *proc = "laplacian2D"; size_t dimx, dimy, dimz; size_t sizeAuxBuf = 0; typeFilteringCoefficients filter[3]; float *auxBuf = NULL; float *tmpBuf = NULL; float *lapBuf = NULL; long int i; dimx = bufferDims[0]; dimy = bufferDims[1]; dimz = bufferDims[2]; sizeAuxBuf = dimx*dimy*dimz; if ( typeOut != FLOAT || bufferIn == bufferOut ) sizeAuxBuf *= 2; /* allocation des buffers de calcul */ auxBuf = (float*)vtmalloc( sizeAuxBuf * sizeof(float), "auxBuf", proc ); if ( auxBuf == NULL ) { if ( _verbose_ > 0 ) fprintf( stderr, "%s: unable to allocate auxiliary buffer\n", proc ); return( -1 ); } tmpBuf = auxBuf; if ( typeOut != FLOAT || bufferIn == bufferOut ) { lapBuf = tmpBuf; lapBuf += dimx*dimy*dimz; } else { lapBuf = (float*)bufferOut; } /* filtering */ filter[0] = theFilter[0]; filter[1] = theFilter[1]; filter[2] = theFilter[2]; /* derivative along X */ filter[0].derivative = DERIVATIVE_2; filter[1].derivative = DERIVATIVE_0; filter[2].derivative = NODERIVATIVE; if ( separableLinearFiltering( bufferIn, typeIn, (void*)lapBuf, FLOAT, bufferDims, borderLengths, filter ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to compute X derivative (2D)\n", proc ); vtfree( auxBuf ); return( -1 ); } /* derivative along Y */ filter[0].derivative = DERIVATIVE_0; filter[1].derivative = DERIVATIVE_2; filter[2].derivative = NODERIVATIVE; if ( separableLinearFiltering( bufferIn, typeIn, (void*)tmpBuf, FLOAT, bufferDims, borderLengths, filter ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to compute Y derivative (2D)\n", proc ); vtfree( auxBuf ); return( -1 ); } sizeAuxBuf = dimx*dimy*dimz; #ifdef _OPENMP #pragma omp parallel for #endif for ( i = 0; i < (long int)sizeAuxBuf; i++ ) lapBuf[i] += tmpBuf[i]; if ( lapBuf != bufferOut ) { if ( ConvertBuffer( lapBuf, FLOAT, bufferOut, typeOut, sizeAuxBuf ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to convert buffer\n", proc ); vtfree( auxBuf ); return( -1 ); } } vtfree( auxBuf ); return( 1 ); } int laplacian3D( void *bufferIn, bufferType typeIn, void *bufferOut, bufferType typeOut, int *bufferDims, int *borderLengths, typeFilteringCoefficients *theFilter ) { char *proc = "laplacian3D"; size_t dimx, dimy, dimz; size_t sizeAuxBuf = 0; typeFilteringCoefficients filter[3]; float *auxBuf = NULL; float *tmpBuf = NULL; float *lapBuf = NULL; long int i; dimx = bufferDims[0]; dimy = bufferDims[1]; dimz = bufferDims[2]; sizeAuxBuf = dimx*dimy*dimz; if ( typeOut != FLOAT || bufferIn == bufferOut ) sizeAuxBuf *= 2; /* allocation des buffers de calcul */ auxBuf = (float*)vtmalloc( sizeAuxBuf * sizeof(float), "auxBuf", proc ); if ( auxBuf == NULL ) { if ( _verbose_ > 0 ) fprintf( stderr, "%s: unable to allocate auxiliary buffer\n", proc ); return( -1 ); } tmpBuf = auxBuf; if ( typeOut != FLOAT || bufferIn == bufferOut ) { lapBuf = tmpBuf; lapBuf += dimx*dimy*dimz; } else { lapBuf = (float*)bufferOut; } /* filtering */ filter[0] = theFilter[0]; filter[1] = theFilter[1]; filter[2] = theFilter[2]; /* smoothing along Z */ filter[0].derivative = NODERIVATIVE; filter[1].derivative = NODERIVATIVE; filter[2].derivative = DERIVATIVE_0; if ( separableLinearFiltering( bufferIn, typeIn, (void*)tmpBuf, FLOAT, bufferDims, borderLengths, filter ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to compute Z smoothing (3D)\n", proc ); vtfree( auxBuf ); return( -1 ); } /* derivative along X */ filter[0].derivative = DERIVATIVE_2; filter[1].derivative = DERIVATIVE_0; filter[2].derivative = NODERIVATIVE; if ( separableLinearFiltering( (void*)tmpBuf, FLOAT, (void*)lapBuf, FLOAT, bufferDims, borderLengths, filter ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to compute X derivative (3D)\n", proc ); vtfree( auxBuf ); return( -1 ); } /* derivative along Y */ filter[0].derivative = DERIVATIVE_0; filter[1].derivative = DERIVATIVE_2; filter[2].derivative = NODERIVATIVE; if ( separableLinearFiltering( (void*)tmpBuf, FLOAT, (void*)tmpBuf, FLOAT, bufferDims, borderLengths, filter ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to compute Y derivative (3D)\n", proc ); vtfree( auxBuf ); return( -1 ); } sizeAuxBuf = dimx*dimy*dimz; #ifdef _OPENMP #pragma omp parallel for #endif for ( i = 0; i < (long int)sizeAuxBuf; i++ ) lapBuf[i] += tmpBuf[i]; /* derivative along Z */ filter[0].derivative = DERIVATIVE_0; filter[1].derivative = DERIVATIVE_0; filter[2].derivative = DERIVATIVE_2; if ( separableLinearFiltering( bufferIn, typeIn, (void*)tmpBuf, FLOAT, bufferDims, borderLengths, filter ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to compute Z derivative (3D)\n", proc ); vtfree( auxBuf ); return( -1 ); } #ifdef _OPENMP #pragma omp parallel for #endif for ( i = 0; i < (long int)sizeAuxBuf; i++ ) lapBuf[i] += tmpBuf[i]; if ( lapBuf != bufferOut ) { if ( ConvertBuffer( lapBuf, FLOAT, bufferOut, typeOut, sizeAuxBuf ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to convert buffer\n", proc ); vtfree( auxBuf ); return( -1 ); } } vtfree( auxBuf ); return( 1 ); } int laplacian( void *bufferIn, bufferType typeIn, void *bufferOut, bufferType typeOut, int *bufferDims, int *borderLengths, typeFilteringCoefficients *theFilter ) { if ( bufferDims[2] == 1 ) return( laplacian2D( bufferIn, typeIn, bufferOut, typeOut, bufferDims, borderLengths, theFilter ) ); else return( laplacian3D( bufferIn, typeIn, bufferOut, typeOut, bufferDims, borderLengths, theFilter ) ); }
GB_binop__div_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__div_int16 // A.*B function (eWiseMult): GB_AemultB__div_int16 // A*D function (colscale): GB_AxD__div_int16 // D*A function (rowscale): GB_DxB__div_int16 // C+=B function (dense accum): GB_Cdense_accumB__div_int16 // C+=b function (dense accum): GB_Cdense_accumb__div_int16 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_int16 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_int16 // C=scalar+B GB_bind1st__div_int16 // C=scalar+B' GB_bind1st_tran__div_int16 // C=A+scalar GB_bind2nd__div_int16 // C=A'+scalar GB_bind2nd_tran__div_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 16) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IDIV_SIGNED (x, y, 16) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_INT16 || GxB_NO_DIV_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__div_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__div_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__div_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__div_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__div_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__div_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__div_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__div_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__div_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = GB_IDIV_SIGNED (x, bij, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__div_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = GB_IDIV_SIGNED (aij, y, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_SIGNED (x, aij, 16) ; \ } GrB_Info GB_bind1st_tran__div_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_SIGNED (aij, y, 16) ; \ } GrB_Info GB_bind2nd_tran__div_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pooling_2x2_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void pooling2x2s2_max_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2*outw + w) * 4; #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const Mat img0 = bottom_blob.channel(q); float* outptr = top_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); for (int i = 0; i < outh; i++) { int j = 0; for (; j+3<outw; j+=4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" "fmax v0.4s, v0.4s, v1.4s \n" "fmax v2.4s, v2.4s, v3.4s \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" "fmax v4.4s, v4.4s, v5.4s \n" "fmax v6.4s, v6.4s, v7.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%2], #64 \n" "fmax v16.4s, v16.4s, v17.4s \n" "fmax v18.4s, v18.4s, v19.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "fmax v20.4s, v20.4s, v21.4s \n" "fmax v22.4s, v22.4s, v23.4s \n" "fmax v0.4s, v0.4s, v16.4s \n" "fmax v1.4s, v2.4s, v18.4s \n" "fmax v2.4s, v4.4s, v20.4s \n" "fmax v3.4s, v6.4s, v22.4s \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1) // %2 : "0"(outptr), "1"(r0), "2"(r1) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else // __aarch64__ asm volatile( "pld [%1, #512] \n" "vldm %1!, {d0-d7} \n" "vmax.f32 q0, q0, q1 \n" "vmax.f32 q2, q2, q3 \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" "vmax.f32 q4, q4, q5 \n" "vmax.f32 q6, q6, q7 \n" "pld [%2, #512] \n" "vldm %2!, {d16-d23} \n" "vmax.f32 q8, q8, q9 \n" "vmax.f32 q10, q10, q11 \n" "pld [%2, #512] \n" "vldm %2!, {d24-d31} \n" "vmax.f32 q12, q12, q13 \n" "vmax.f32 q14, q14, q15 \n" "vmax.f32 q0, q0, q8 \n" "vmax.f32 q1, q2, q10 \n" "vmax.f32 q2, q4, q12 \n" "vmax.f32 q3, q6, q14 \n" "vstm %0!, {d0-d7} \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1) // %2 : "0"(outptr), "1"(r0), "2"(r1) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j<outw; j++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0+4); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1+4); float32x4_t _max0 = vmaxq_f32(_r00, _r01); float32x4_t _max1 = vmaxq_f32(_r10, _r11); float32x4_t _max = vmaxq_f32(_max0, _max1); vst1q_f32(outptr, _max); r0 += 8; r1 += 8; outptr += 4; } r0 += tailstep; r1 += tailstep; } } }
YAKL_reductions.h
#pragma once // Included by YAKL.h namespace yakl { template <class T, int memSpace, int RED> class ParallelReduction; int constexpr YAKL_REDUCTION_MIN = 0; int constexpr YAKL_REDUCTION_MAX = 1; int constexpr YAKL_REDUCTION_SUM = 2; int constexpr YAKL_REDUCTION_PROD = 3; // It is highly recommended that the user use yakl::intrinsics::minval, yakl::intrinsics::maxval, and // yakl::intrinsics::sum instead of the classes in this file. // These are parallel reduction classes for host and device // They handle all memory allocation and deallocation internally to reduce the chance of memory errors // setup and constructor take an integer of the number of items needing to be reduced // operator() will perform a reduction on the device and return the result as a scalar value // after copying to the host // deviceReduce will perform a reduciton on the device and leave hte result on the device as a pointer // Array is passed to the device as a pointer, so it's not completely safe is the user makes an error // where the repested nItems differs from the actual number of items in the array. template <class T, int RED> class ParallelReduction<T,memHost,RED> { int nItems; // Number of items in the array that will be reduced public: ParallelReduction() {} ParallelReduction(int const nItems) { this->nItems = nItems; } ~ParallelReduction() {} void setup(int nItems) { this->nItems = nItems; } T operator() (T *data) { T rslt = data[0]; if constexpr (RED == YAKL_REDUCTION_MIN) { for (int i=1; i<nItems; i++) { rslt = data[i] < rslt ? data[i] : rslt; } } else if constexpr (RED == YAKL_REDUCTION_MAX) { for (int i=1; i<nItems; i++) { rslt = data[i] > rslt ? data[i] : rslt; } } else if constexpr (RED == YAKL_REDUCTION_SUM) { for (int i=1; i<nItems; i++) { rslt += data[i]; } } else if constexpr (RED == YAKL_REDUCTION_PROD) { for (int i=1; i<nItems; i++) { rslt *= data[i]; } } return rslt; } }; #ifdef YAKL_ARCH_HIP template <class T, int RED> class ParallelReduction<T,memDevice,RED> { void *tmp; // Temporary storage size_t nTmp; // Size of temporary storage int nItems; // Number of items in the array that will be reduced T *rsltP; // Device pointer for reduction result public: ParallelReduction() { tmp = NULL; } ParallelReduction(int const nItems) { tmp = NULL; setup(nItems); } ~ParallelReduction() { finalize(); } void setup(int const nItems) { #ifdef YAKL_AUTO_PROFILE timer_start("YAKL_internal_reduction_setup"); #endif finalize(); rsltP = (T *) yaklAllocDevice(sizeof(T),"Parallel Reduction Result"); // Allocate device pointer for result // Get the amount of temporary storage needed (call with NULL storage pointer) if constexpr (RED == YAKL_REDUCTION_MIN) { hipcub::DeviceReduce::Min(tmp, nTmp, rsltP , rsltP , nItems ); } else if constexpr (RED == YAKL_REDUCTION_MAX) { hipcub::DeviceReduce::Max(tmp, nTmp, rsltP , rsltP , nItems ); } else if constexpr (RED == YAKL_REDUCTION_SUM) { hipcub::DeviceReduce::Sum(tmp, nTmp, rsltP , rsltP , nItems ); } else if constexpr (RED == YAKL_REDUCTION_PROD) { hipcub::DeviceReduce::Reduce(tmp, nTmp, rsltP, rsltP, nItems, YAKL_LAMBDA (T a,T b)->T {return a*b;} , (T) 1 ); } tmp = yaklAllocDevice(nTmp,"Parallel Reduction Temporary"); // Allocate temporary storage this->nItems = nItems; #ifdef YAKL_AUTO_PROFILE timer_stop("YAKL_internal_reduction_setup"); #endif } void finalize() { if (tmp != NULL) { yaklFreeDevice(rsltP,"Parallel Reduction Result"); yaklFreeDevice(tmp,"Parallel Reduction Temporary"); } tmp = NULL; } T operator() (T *data) { #ifdef YAKL_AUTO_PROFILE timer_start("YAKL_internal_reduction_apply"); #endif T rslt; if constexpr (RED == YAKL_REDUCTION_MIN) { hipcub::DeviceReduce::Min(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction } else if constexpr (RED == YAKL_REDUCTION_MAX) { hipcub::DeviceReduce::Max(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction } else if constexpr (RED == YAKL_REDUCTION_SUM) { hipcub::DeviceReduce::Sum(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction } else if constexpr (RED == YAKL_REDUCTION_PROD) { hipcub::DeviceReduce::Reduce(tmp, nTmp, data, rsltP, nItems, YAKL_LAMBDA (T a,T b)->T {return a*b;} , (T) 1, 0 ); } memcpy_device_to_host(&rslt , rsltP , 1 ); check_last_error(); fence(); #ifdef YAKL_AUTO_PROFILE timer_stop("YAKL_internal_reduction_apply"); #endif return rslt; } }; #elif defined(YAKL_ARCH_CUDA) template <class T, int RED> class ParallelReduction<T,memDevice,RED> { void *tmp; // Temporary storage size_t nTmp; // Size of temporary storage int nItems; // Number of items in the array that will be reduced T *rsltP; // Device pointer for reduction result public: ParallelReduction() { tmp = NULL; } ParallelReduction(int const nItems) { tmp = NULL; setup(nItems); } ~ParallelReduction() { finalize(); } void setup(int const nItems) { #ifdef YAKL_AUTO_PROFILE timer_start("YAKL_internal_reduction_setup"); #endif finalize(); rsltP = (T *) yaklAllocDevice(sizeof(T),"Parallel Reduction Result"); // Allocate device pointer for result // Get the amount of temporary storage needed (call with NULL storage pointer) if constexpr (RED == YAKL_REDUCTION_MIN) { cub::DeviceReduce::Min(tmp, nTmp, rsltP , rsltP , nItems ); } else if constexpr (RED == YAKL_REDUCTION_MAX) { cub::DeviceReduce::Max(tmp, nTmp, rsltP , rsltP , nItems ); } else if constexpr (RED == YAKL_REDUCTION_SUM) { cub::DeviceReduce::Sum(tmp, nTmp, rsltP , rsltP , nItems ); } else if constexpr (RED == YAKL_REDUCTION_PROD) { cub::DeviceReduce::Reduce(tmp, nTmp, rsltP, rsltP, nItems, YAKL_LAMBDA (T a,T b)->T {return a*b;} , (T) 1 ); } tmp = yaklAllocDevice(nTmp,"Parallel Reduction Temporary"); // Allocate temporary storage this->nItems = nItems; #ifdef YAKL_AUTO_PROFILE timer_stop("YAKL_internal_reduction_setup"); #endif } void finalize() { if (tmp != NULL) { yaklFreeDevice(rsltP,"Parallel Reduction Result"); yaklFreeDevice(tmp,"Parallel Reduction Temporary"); } tmp = NULL; } T operator() (T *data) { #ifdef YAKL_AUTO_PROFILE timer_start("YAKL_internal_reduction_apply"); #endif T rslt; if constexpr (RED == YAKL_REDUCTION_MIN) { cub::DeviceReduce::Min(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction } else if constexpr (RED == YAKL_REDUCTION_MAX) { cub::DeviceReduce::Max(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction } else if constexpr (RED == YAKL_REDUCTION_SUM) { cub::DeviceReduce::Sum(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction } else if constexpr (RED == YAKL_REDUCTION_PROD) { cub::DeviceReduce::Reduce(tmp, nTmp, data ,rsltP, nItems, YAKL_LAMBDA (T a,T b)->T {return a*b;} , (T) 1, 0 ); } memcpy_device_to_host(&rslt , rsltP , 1 ); check_last_error(); fence(); #ifdef YAKL_AUTO_PROFILE timer_stop("YAKL_internal_reduction_apply"); #endif return rslt; } }; #elif defined(YAKL_ARCH_SYCL) template <class T, int RED> class ParallelReduction<T,memDevice,RED> { int nItems; // Number of items in the array that will be reduced T *rsltP; // Device pointer for reduction result public: ParallelReduction() { rsltP = nullptr; } ParallelReduction(int const nItems) { rsltP = nullptr; setup(nItems); } ~ParallelReduction() { finalize(); } void setup(int const nItems) { #ifdef YAKL_AUTO_PROFILE timer_start("YAKL_internal_reduction_setup"); #endif finalize(); rsltP = (T *) yaklAllocDevice(sizeof(T),"Parallel Reduction Result"); // Allocate device pointer for result this->nItems = nItems; #ifdef YAKL_AUTO_PROFILE timer_stop("YAKL_internal_reduction_setup"); #endif } void finalize() { if(rsltP != nullptr) { yaklFreeDevice(rsltP,"Parallel Reduction Result"); } rsltP = nullptr; } T operator() (T *data) { #ifdef YAKL_AUTO_PROFILE timer_start("YAKL_internal_reduction_apply"); #endif T rslt=0; sycl_default_stream().submit([&, nItems = this->nItems](sycl::handler &cgh) { if constexpr (RED == YAKL_REDUCTION_MIN) { cgh.parallel_for(sycl::range<1>(nItems), sycl::reduction(rsltP, sycl::minimum<>(), sycl::property::reduction::initialize_to_identity{}), [=] (sycl::id<1> idx, auto& min) { min.combine(data[idx]); }); } else if constexpr (RED == YAKL_REDUCTION_MAX) { cgh.parallel_for(sycl::range<1>(nItems), sycl::reduction(rsltP, sycl::maximum<>(), sycl::property::reduction::initialize_to_identity{}), [=] (sycl::id<1> idx, auto& max) { max.combine(data[idx]); }); } else if constexpr (RED == YAKL_REDUCTION_SUM) { cgh.parallel_for(sycl::range<1>(nItems), sycl::reduction(rsltP, std::plus<>(), sycl::property::reduction::initialize_to_identity{}), [=] (sycl::id<1> idx, auto& sum) { sum.combine(data[idx]); }); } else if constexpr (RED == YAKL_REDUCTION_PROD) { cgh.parallel_for(sycl::range<1>(nItems), sycl::reduction(rsltP, std::multiplies<>(), sycl::property::reduction::initialize_to_identity{}), [=] (sycl::id<1> idx, auto& prod) { prod.combine(data[idx]); }); } }); memcpy_device_to_host(&rslt , rsltP , 1 ); fence(); #ifdef YAKL_AUTO_PROFILE timer_stop("YAKL_internal_reduction_apply"); #endif return rslt; } }; #elif defined(YAKL_ARCH_OPENMP) template <class T, int RED> class ParallelReduction<T,memDevice,RED> { int nItems; // Number of items in the array that will be reduced public: ParallelReduction() {} ParallelReduction(int const nItems) { this->nItems = nItems; } ~ParallelReduction() {} void setup(int nItems) { this->nItems = nItems; } T operator() (T *data) { #ifdef YAKL_AUTO_PROFILE timer_start("YAKL_internal_reduction_apply"); #endif T rslt = data[0]; if constexpr (RED == YAKL_REDUCTION_MIN) { #pragma omp parallel for reduction(min:rslt) for (int i=1; i<nItems; i++) { rslt = data[i] < rslt ? data[i] : rslt; } } else if constexpr (RED == YAKL_REDUCTION_MAX) { #pragma omp parallel for reduction(max:rslt) for (int i=1; i<nItems; i++) { rslt = data[i] > rslt ? data[i] : rslt; } } else if constexpr (RED == YAKL_REDUCTION_SUM) { #pragma omp parallel for reduction(+:rslt) for (int i=1; i<nItems; i++) { rslt += data[i]; } } else if constexpr (RED == YAKL_REDUCTION_PROD) { #pragma omp parallel for reduction(*:rslt) for (int i=1; i<nItems; i++) { rslt *= data[i]; } } #ifdef YAKL_AUTO_PROFILE timer_stop("YAKL_internal_reduction_apply"); #endif return rslt; } }; #else template <class T, int RED> class ParallelReduction<T,memDevice,RED> { int nItems; // Number of items in the array that will be reduced public: ParallelReduction() {} ParallelReduction(int const nItems) { this->nItems = nItems; } ~ParallelReduction() {} void setup(int nItems) { this->nItems = nItems; } T operator() (T *data) { T rslt = data[0]; if constexpr (RED == YAKL_REDUCTION_MIN) { for (int i=1; i<nItems; i++) { rslt = data[i] < rslt ? data[i] : rslt; } } else if constexpr (RED == YAKL_REDUCTION_MAX) { for (int i=1; i<nItems; i++) { rslt = data[i] > rslt ? data[i] : rslt; } } else if constexpr (RED == YAKL_REDUCTION_SUM) { for (int i=1; i<nItems; i++) { rslt += data[i]; } } else if constexpr (RED == YAKL_REDUCTION_PROD) { for (int i=1; i<nItems; i++) { rslt *= data[i]; } } return rslt; } }; #endif template <class T, int memSpace> using ParallelMin = ParallelReduction<T,memSpace,YAKL_REDUCTION_MIN >; template <class T, int memSpace> using ParallelMax = ParallelReduction<T,memSpace,YAKL_REDUCTION_MAX >; template <class T, int memSpace> using ParallelSum = ParallelReduction<T,memSpace,YAKL_REDUCTION_SUM >; template <class T, int memSpace> using ParallelProd = ParallelReduction<T,memSpace,YAKL_REDUCTION_PROD>; }
quicksort.h
// -*- C++ -*- // Copyright (C) 2007-2015 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/quicksort.h * @brief Implementation of a unbalanced parallel quicksort (in-place). * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Johannes Singler. #ifndef _GLIBCXX_PARALLEL_QUICKSORT_H #define _GLIBCXX_PARALLEL_QUICKSORT_H 1 #include <parallel/parallel.h> #include <parallel/partition.h> namespace __gnu_parallel { /** @brief Unbalanced quicksort divide step. * @param __begin Begin iterator of subsequence. * @param __end End iterator of subsequence. * @param __comp Comparator. * @param __pivot_rank Desired __rank of the pivot. * @param __num_samples Choose pivot from that many samples. * @param __num_threads Number of threads that are allowed to work on * this part. */ template<typename _RAIter, typename _Compare> typename std::iterator_traits<_RAIter>::difference_type __parallel_sort_qs_divide(_RAIter __begin, _RAIter __end, _Compare __comp, typename std::iterator_traits <_RAIter>::difference_type __pivot_rank, typename std::iterator_traits <_RAIter>::difference_type __num_samples, _ThreadIndex __num_threads) { typedef std::iterator_traits<_RAIter> _TraitsType; typedef typename _TraitsType::value_type _ValueType; typedef typename _TraitsType::difference_type _DifferenceType; _DifferenceType __n = __end - __begin; __num_samples = std::min(__num_samples, __n); // Allocate uninitialized, to avoid default constructor. _ValueType* __samples = static_cast<_ValueType*> (::operator new(__num_samples * sizeof(_ValueType))); for (_DifferenceType __s = 0; __s < __num_samples; ++__s) { const unsigned long long __index = static_cast<unsigned long long> (__s) * __n / __num_samples; ::new(&(__samples[__s])) _ValueType(__begin[__index]); } __gnu_sequential::sort(__samples, __samples + __num_samples, __comp); _ValueType& __pivot = __samples[__pivot_rank * __num_samples / __n]; __gnu_parallel::__binder2nd<_Compare, _ValueType, _ValueType, bool> __pred(__comp, __pivot); _DifferenceType __split = __parallel_partition(__begin, __end, __pred, __num_threads); for (_DifferenceType __s = 0; __s < __num_samples; ++__s) __samples[__s].~_ValueType(); ::operator delete(__samples); return __split; } /** @brief Unbalanced quicksort conquer step. * @param __begin Begin iterator of subsequence. * @param __end End iterator of subsequence. * @param __comp Comparator. * @param __num_threads Number of threads that are allowed to work on * this part. */ template<typename _RAIter, typename _Compare> void __parallel_sort_qs_conquer(_RAIter __begin, _RAIter __end, _Compare __comp, _ThreadIndex __num_threads) { typedef std::iterator_traits<_RAIter> _TraitsType; typedef typename _TraitsType::value_type _ValueType; typedef typename _TraitsType::difference_type _DifferenceType; if (__num_threads <= 1) { __gnu_sequential::sort(__begin, __end, __comp); return; } _DifferenceType __n = __end - __begin, __pivot_rank; if (__n <= 1) return; _ThreadIndex __num_threads_left; if ((__num_threads % 2) == 1) __num_threads_left = __num_threads / 2 + 1; else __num_threads_left = __num_threads / 2; __pivot_rank = __n * __num_threads_left / __num_threads; _DifferenceType __split = __parallel_sort_qs_divide (__begin, __end, __comp, __pivot_rank, _Settings::get().sort_qs_num_samples_preset, __num_threads); #pragma omp parallel sections num_threads(2) { #pragma omp section __parallel_sort_qs_conquer(__begin, __begin + __split, __comp, __num_threads_left); #pragma omp section __parallel_sort_qs_conquer(__begin + __split, __end, __comp, __num_threads - __num_threads_left); } } /** @brief Unbalanced quicksort main call. * @param __begin Begin iterator of input sequence. * @param __end End iterator input sequence, ignored. * @param __comp Comparator. * @param __num_threads Number of threads that are allowed to work on * this part. */ template<typename _RAIter, typename _Compare> void __parallel_sort_qs(_RAIter __begin, _RAIter __end, _Compare __comp, _ThreadIndex __num_threads) { _GLIBCXX_CALL(__n) typedef std::iterator_traits<_RAIter> _TraitsType; typedef typename _TraitsType::value_type _ValueType; typedef typename _TraitsType::difference_type _DifferenceType; _DifferenceType __n = __end - __begin; // At least one element per processor. if (__num_threads > __n) __num_threads = static_cast<_ThreadIndex>(__n); __parallel_sort_qs_conquer( __begin, __begin + __n, __comp, __num_threads); } } //namespace __gnu_parallel #endif /* _GLIBCXX_PARALLEL_QUICKSORT_H */
triMeshAcceleratorBVHMatt.h
#pragma once // // Perf changes to decrease size of TriangleBVHNode and improve cache coherency. Will test more thoroughly later. // #ifndef _TRIMESH_ACCELERATOR_BVH_H_ #define _TRIMESH_ACCELERATOR_BVH_H_ namespace ml { template <class FloatType> struct TriangleBVHNode { TriangleBVHNode() : rChild(0), lChild(0), leafTri(0) {} ~TriangleBVHNode() { SAFE_DELETE(rChild); SAFE_DELETE(lChild); } //wait for vs 2013 //template<class T> //using Triangle = TriMesh::Triangle<T>; TriangleBVHNode<FloatType> *lChild; TriangleBVHNode<FloatType> *rChild; union { struct { BoundingBox3<FloatType> boundingBox; }; struct { vec3f vertices[3]; typename TriMesh<FloatType>::Triangle<FloatType> *leafTri; }; }; void computeBoundingBox() { boundingBox.reset(); if (!lChild && !rChild) { leafTri->includeInBoundingBox(boundingBox); } else { if (lChild) { lChild->computeBoundingBox(); boundingBox.include(lChild->boundingBox); } if (rChild) { rChild->computeBoundingBox(); boundingBox.include(rChild->boundingBox); } } } void loadTri(typename TriMesh<FloatType>::Triangle<FloatType> *tri) { vertices[0] = tri->getV0().position; vertices[1] = tri->getV1().position; vertices[2] = tri->getV2().position; leafTri = tri; } void split(typename std::vector<typename TriMesh<FloatType>::Triangle<FloatType>*>::iterator& begin, typename std::vector<typename TriMesh<FloatType>::Triangle<FloatType>*>::iterator& end, unsigned int lastSortAxis) { if (end - begin > 1) { if (lastSortAxis == 0) std::stable_sort(begin, end, cmpX); else if (lastSortAxis == 1) std::stable_sort(begin, end, cmpY); else std::stable_sort(begin, end, cmpZ); lChild = new TriangleBVHNode; rChild = new TriangleBVHNode; const unsigned int newSortAxis = (lastSortAxis+1)%3; lChild->split(begin, begin + ((end-begin)/2), newSortAxis); rChild->split(begin + ((end-begin)/2), end, newSortAxis); } else { assert(end - begin == 1); loadTri(*begin); //found a leaf } } inline bool isLeaf() const { // // TODO: check with Matthias. It should be fine to just check the left child. // //return !(lChild || rChild); return !(lChild); } typename const TriMesh<FloatType>::Triangle<FloatType>* intersect(const Ray<FloatType> &r, FloatType& t, FloatType& u, FloatType& v, FloatType& tmin, FloatType& tmax, bool onlyFrontFaces = false) const { if (t < tmin || t > tmax) return nullptr; //early out (warning t must be initialized) if (boundingBox.intersect(r, tmin, tmax)) { if (isLeaf()) { if (intersection::intersectRayTriangle(vertices[0], vertices[1], vertices[2], r, t, u, v, tmin, tmax, onlyFrontFaces)) { tmax = t; return leafTri; } } else { typename const TriMesh<FloatType>::Triangle<FloatType>* t0 = lChild->intersect(r, t, u, v, tmin, tmax, onlyFrontFaces); typename const TriMesh<FloatType>::Triangle<FloatType>* t1 = rChild->intersect(r, t, u, v, tmin, tmax, onlyFrontFaces); if (t1) return t1; if (t0) return t0; } } return nullptr; } // collisions with other Triangles bool intersects(const typename TriMesh<FloatType>::Triangle<FloatType>* tri) const { if (boundingBox.intersects(tri->getV0().position, tri->getV1().position, tri->getV2().position)) { if (isLeaf()) { return tri->intersects(*leafTri); } else { return lChild->intersects(tri) || rChild->intersects(tri); } } else { return false; } } bool intersects(const typename TriMesh<FloatType>::Triangle<FloatType>* tri, const Matrix4x4<FloatType>& transform) const { typename TriMesh<FloatType>::Vertex<FloatType> v0(transform * tri->getV0().position); typename TriMesh<FloatType>::Vertex<FloatType> v1(transform * tri->getV1().position); typename TriMesh<FloatType>::Vertex<FloatType> v2(transform * tri->getV2().position); typename TriMesh<FloatType>::Triangle<FloatType> triTrans(&v0,&v1,&v2); if (boundingBox.intersects(triTrans.getV0().position, triTrans.getV1().position, triTrans.getV2().position)) { if (isLeaf()) { return triTrans.intersects(*leafTri); } else { return lChild->intersects(&triTrans) || rChild->intersects(&triTrans); } } else { return false; } } // collisions with other TriangleBVHNodes bool intersects(const TriangleBVHNode& other) const { if (boundingBox.intersects(other.boundingBox)) { if (isLeaf()) { return other.intersects(leafTri); } else { return lChild->intersects(other) || rChild->intersects(other); } } else { return false; } } bool intersects(const TriangleBVHNode& other, const Matrix4x4<FloatType>& transform) const { if (boundingBox.intersects(other.boundingBox * transform)) { //TODO fix OBB if (isLeaf()) { return other.intersects(leafTri, transform.getInverse()); } else { return lChild->intersects(other, transform) || rChild->intersects(other, transform); } } else { return false; } } bool collisionBBoxOnly(const TriangleBVHNode& other, const Matrix4x4<FloatType>& transform) const { if (boundingBox.intersects(other.boundingBox * transform)) { //TODO fix OBB if (isLeaf()) { return true; } else { return lChild->collisionBBoxOnly(other, transform) || rChild->collisionBBoxOnly(other, transform); } } else { return false; } } unsigned int getTreeDepthRec() const { unsigned int maxDepth = 0; if (lChild) maxDepth = std::max(maxDepth, lChild->getTreeDepthRec()); if (rChild) maxDepth = std::max(maxDepth, rChild->getTreeDepthRec()); return maxDepth+1; } unsigned int getNumNodesRec() const { unsigned int numNodes = 1; if (lChild) numNodes += lChild->getNumNodesRec(); if (rChild) numNodes += rChild->getNumNodesRec(); return numNodes; } unsigned int getNumLeaves() const { unsigned int numLeaves = 0; if (lChild) numLeaves += lChild->getNumLeaves(); if (rChild) numLeaves += rChild->getNumLeaves(); if (!lChild && !rChild) { assert(leafTri); numLeaves++; } return numLeaves; } static bool cmpX(typename TriMesh<FloatType>::Triangle<FloatType> *t0, typename TriMesh<FloatType>::Triangle<FloatType> *t1) { return t0->getCenter().x < t1->getCenter().x; } static bool cmpY(typename TriMesh<FloatType>::Triangle<FloatType> *t0, typename TriMesh<FloatType>::Triangle<FloatType> *t1) { return t0->getCenter().y < t1->getCenter().y; } static bool cmpZ(typename TriMesh<FloatType>::Triangle<FloatType> *t0, typename TriMesh<FloatType>::Triangle<FloatType> *t1) { return t0->getCenter().z < t1->getCenter().z; } }; template <class FloatType> class TriMeshAcceleratorBVH : public TriMeshRayAccelerator<FloatType>, public TriMeshCollisionAccelerator<FloatType, TriMeshAcceleratorBVH<FloatType>> { public: TriMeshAcceleratorBVH() { m_Root = nullptr; } TriMeshAcceleratorBVH(const TriMesh<FloatType>& triMesh, bool storeLocalCopy = false) { m_Root = nullptr; build(triMesh, storeLocalCopy); //std::vector<const TriMesh<FloatType>*> meshes; //meshes.push_back(&triMesh); //build(meshes, true); //std::vector<std::pair<const TriMesh<FloatType>*, Matrix4x4<FloatType>>> meshes; //meshes.push_back(std::make_pair(&triMesh, Matrix4x4<FloatType>::identity())); //build(meshes); } ~TriMeshAcceleratorBVH() { SAFE_DELETE(m_Root); } void printInfo() const { std::cout << "Info: TriangleBVHAccelerator build done ( " << m_TrianglePointers.size() << " tris )" << std::endl; std::cout << "Info: Tree depth " << m_Root->getTreeDepthRec() << std::endl; std::cout << "Info: NumNodes " << m_Root->getNumNodesRec() << std::endl; std::cout << "Info: NumLeaves " << m_Root->getNumLeaves() << std::endl; } private: //! defined by the interface bool collisionInternal(const TriMeshAcceleratorBVH<FloatType>& other) const { return m_Root->intersects(*other.m_Root); } bool collisionTransformInternal(const TriMeshAcceleratorBVH<FloatType>& other, const Matrix4x4<FloatType>& transform) const { return m_Root->intersects(*other.m_Root, transform); } bool collisionTransformBBoxOnlyInternal(const TriMeshAcceleratorBVH<FloatType>& other, const Matrix4x4<FloatType>& transform) const { return m_Root->collisionBBoxOnly(*other.m_Root, transform); } //! defined by the interface typename const TriMesh<FloatType>::Triangle<FloatType>* intersectInternal(const Ray<FloatType>& r, FloatType& t, FloatType& u, FloatType& v, FloatType tmin = (FloatType)0, FloatType tmax = std::numeric_limits<FloatType>::max(), bool onlyFrontFaces = false) const { u = v = std::numeric_limits<FloatType>::max(); t = tmax; //TODO MATTHIAS: probably we don't have to track tmax since t must always be smaller than the prev return m_Root->intersect(r, t, u, v, tmin, tmax, onlyFrontFaces); } //! defined by the interface void buildInternal() { SAFE_DELETE(m_Root); bool useParallelBuild = false; if (useParallelBuild) { buildParallel(m_TrianglePointers); } else { buildRecursive(m_TrianglePointers); } } void buildParallel(std::vector<typename TriMesh<FloatType>::Triangle<FloatType>*>& tris) { struct NodeEntry { size_t begin; size_t end; TriangleBVHNode<FloatType> *node; }; std::vector<NodeEntry> currLevel(1); m_Root = new TriangleBVHNode<FloatType>; currLevel[0].node = m_Root; currLevel[0].begin = 0; currLevel[0].end = tris.size(); unsigned int lastSortAxis = 0; bool needFurtherSplitting = true; while(needFurtherSplitting) { needFurtherSplitting = false; std::vector<NodeEntry> nextLevel(currLevel.size()*2); #pragma omp parallel for for (int i = 0; i < (int)std::min(currLevel.size(),tris.size()); i++) { const size_t begin = currLevel[i].begin; const size_t end = currLevel[i].end; if (end - begin > 1) { if (lastSortAxis == 0) std::stable_sort(tris.begin()+begin, tris.begin()+end, TriangleBVHNode<FloatType>::cmpX); else if (lastSortAxis == 1) std::stable_sort(tris.begin()+begin, tris.begin()+end, TriangleBVHNode<FloatType>::cmpY); else std::stable_sort(tris.begin()+begin, tris.begin()+end, TriangleBVHNode<FloatType>::cmpZ); TriangleBVHNode<FloatType>* node = currLevel[i].node; TriangleBVHNode<FloatType>* lChild = new TriangleBVHNode<FloatType>; TriangleBVHNode<FloatType>* rChild = new TriangleBVHNode<FloatType>; node->lChild = lChild; node->rChild = rChild; nextLevel[2*i+0].begin = begin; nextLevel[2*i+0].end = begin + ((end-begin)/2); nextLevel[2*i+1].begin = begin + ((end-begin)/2); nextLevel[2*i+1].end = end; nextLevel[2*i+0].node = currLevel[i].node->lChild; nextLevel[2*i+1].node = currLevel[i].node->rChild; if (nextLevel[2*i+0].end - nextLevel[2*i+0].begin < 2) lChild->loadTri(tris[nextLevel[2*i+0].begin]); else needFurtherSplitting = true; if (nextLevel[2*i+1].end - nextLevel[2*i+1].begin < 2) rChild->loadTri(tris[nextLevel[2*i+1].begin]); else needFurtherSplitting = true; } } if (needFurtherSplitting) { currLevel = nextLevel; lastSortAxis = (lastSortAxis+1)%3; } } m_Root->computeBoundingBox(); } void buildRecursive(std::vector<typename TriMesh<FloatType>::Triangle<FloatType>*>& tris) { assert(tris.size() > 2); m_Root = new TriangleBVHNode<FloatType>; m_Root->split(tris.begin(), tris.end(), 0); m_Root->computeBoundingBox(); } //! private data TriangleBVHNode<FloatType>* m_Root; }; typedef TriMeshAcceleratorBVH<float> TriMeshAcceleratorBVHf; typedef TriMeshAcceleratorBVH<double> TriMeshAcceleratorBVHd; } // namespace ml #endif