source
stringlengths
3
92
c
stringlengths
26
2.25M
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColourLookup { DDSSourceBlock sources[2]; } DDSSingleColourLookup; typedef MagickBooleanType DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *); typedef MagickBooleanType DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *); static const DDSSingleColourLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColourLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColourLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ /* Forward declarations */ static MagickBooleanType ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3, DDSVector4 *, DDSVector4 *, unsigned char *, size_t), ReadDDSInfo(Image *,DDSInfo *), ReadDXT1(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadDXT3(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadDXT5(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadUncompressedRGB(const ImageInfo *,Image *,DDSInfo *, const MagickBooleanType,ExceptionInfo *), ReadUncompressedRGBA(const ImageInfo *,Image *,DDSInfo *, const MagickBooleanType,ExceptionInfo *), SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *), WriteMipmaps(Image *,const ImageInfo*,const size_t,const size_t,const size_t, const MagickBooleanType,const MagickBooleanType,const MagickBooleanType, ExceptionInfo *); static void RemapIndices(const ssize_t *,const unsigned char *,unsigned char *), WriteDDSInfo(Image *,const size_t,const size_t,const size_t), WriteFourCC(Image *,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteIndices(Image *,const DDSVector3,const DDSVector3,unsigned char *), WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *), WriteUncompressed(Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; register ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3* end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4* points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; register ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColourLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { register ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z))); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; register ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { const char *option; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; Image *image; MagickBooleanType status, cubemap, volume, read_mipmaps; PixelTrait alpha_trait; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cubemap=MagickFalse, volume=MagickFalse, read_mipmaps=MagickFalse; image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; (void) SeekBlob(image, 128, SEEK_SET); /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if ((num_images == 0) || (num_images > GetBlobSize(image))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse) ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit"); option=GetImageOption(image_info,"dds:skip-mipmaps"); if (IsStringFalse(option) != MagickFalse) read_mipmaps=MagickTrue; for (n = 0; n < num_images; n++) { if (n != 0) { /* Start a new image */ if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->alpha_trait=alpha_trait; image->compression=compression; image->columns=dds_info.width; image->rows=dds_info.height; image->storage_class=DirectClass; image->endian=LSBEndian; image->depth=8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); (void) SetImageBackgroundColor(image,exception); status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception); if (status == MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ return MagickTrue; } static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y, DDSColors colors,size_t bits,Quantum *q) { register ssize_t i; ssize_t j; unsigned char code; for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q); if ((colors.a[code] != 0) && (image->alpha_trait == UndefinedPixelTrait)) return(MagickFalse); q+=GetPixelChannels(image); } } } return(MagickTrue); } static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception) { MagickBooleanType status; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } status=MagickTrue; if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { AcquireNextImage(image_info,image,exception); if (image->next == (Image *) NULL) return(MagickFalse); image->next->alpha_trait=image->alpha_trait; image=SyncNextImageInList(image); status=SetImageExtent(image,w,h,exception); if (status == MagickFalse) break; status=decoder(image,dds_info,exception); if (status == MagickFalse) break; if ((w == 1) && (h == 1)) break; w=DIV2(w); h=DIV2(h); } } return(status); } static MagickBooleanType ReadDXT1Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t x; size_t bits; ssize_t y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read 8 bytes of data from the image */ c0=ReadBlobLSBShort(image); c1=ReadBlobLSBShort(image); bits=ReadBlobLSBLong(image); CalculateColors(c0,c1,&colors,MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse) { /* Correct alpha */ SetImageAlpha(image,QuantumRange,exception); q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q != (Quantum *) NULL) SetDXT1Pixels(image,x,y,colors,bits,q); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; MagickSizeType alpha_bits; register Quantum *q; register ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGBPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { register Quantum *q; ssize_t x, y; unsigned short color; for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 8) SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q); else if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); SetPixelRed(image,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); if (dds_info->pixelformat.rgb_bitcount == 32) (void) ReadBlobByte(image); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (dds_info->pixelformat.rgb_bitcount == 8) (void) SetImageType(image,GrayscaleType,exception); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBAPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { register Quantum *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleAlphaType,exception); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else if (alphaBits == 2) { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (color >> 8)),q); SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q); } else { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255)),q); } } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { register ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)w*h*pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5, size_t max5, size_t min7, size_t max7) { register ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteCompressed(Image *image, const size_t count, DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if ((clusterFit == MagickFalse) || (count == 0)) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image, ExceptionInfo *exception) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, fromlist, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace,exception); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (image->alpha_trait == UndefinedPixelTrait) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } mipmaps=0; fromlist=MagickFalse; option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) { if (LocaleNCompare(option,"fromlist",8) == 0) { Image *next; fromlist=MagickTrue; next=image->next; while(next != (Image *) NULL) { mipmaps++; next=next->next; } } } if ((mipmaps == 0) && ((image->columns & (image->columns - 1)) == 0) && ((image->rows & (image->rows - 1)) == 0)) { maxMipmaps=SIZE_MAX; if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } option=GetImageOption(image_info,"dds:raw"); if (IsStringTrue(option) == MagickFalse) WriteDDSInfo(image,pixelFormat,compression,mipmaps); else mipmaps=0; WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, exception); if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression, mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse)) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MagickPathExtent]; register ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) memset(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) /* bitcount / masks */ (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->alpha_trait != UndefinedPixelTrait) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) /* ddscaps2 + reserved region */ (void) WriteBlobLSBLong(image,0x00); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { register ssize_t x; ssize_t i, y, bx, by; register const Quantum *p; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const Quantum *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(image,p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p+=GetPixelChannels(image); match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char *indices) { register ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info, const size_t pixelFormat,const size_t compression,const size_t mipmaps, const MagickBooleanType fromlist,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,ExceptionInfo *exception) { const char *option; Image *mipmap_image, *resize_image; MagickBooleanType fast_mipmaps, status; register ssize_t i; size_t columns, rows; columns=DIV2(image->columns); rows=DIV2(image->rows); option=GetImageOption(image_info,"dds:fast-mipmaps"); fast_mipmaps=IsStringTrue(option); mipmap_image=image; resize_image=image; status=MagickTrue; for (i=0; i < (ssize_t) mipmaps; i++) { if (fromlist == MagickFalse) { mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter, exception); if (mipmap_image == (Image *) NULL) { status=MagickFalse; break; } } else { mipmap_image=mipmap_image->next; if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows)) ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported", image->filename); } DestroyBlob(mipmap_image); mipmap_image->blob=ReferenceBlob(image->blob); WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); if (fromlist == MagickFalse) { if (fast_mipmaps == MagickFalse) mipmap_image=DestroyImage(mipmap_image); else { if (resize_image != image) resize_image=DestroyImage(resize_image); resize_image=mipmap_image; } } columns=DIV2(columns); rows=DIV2(rows); } if (resize_image != image) resize_image=DestroyImage(resize_image); return(status); } static void WriteSingleColorFit(Image *image, const DDSVector4 *points, const ssize_t *map) { DDSVector3 start, end; register ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { register const Quantum *p; register ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p))); if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p))); p+=GetPixelChannels(image); } } }
openmp_closest_bygraph.c
/* Bag of Tasks OpenMP implementation to find the closest pairs of waypoints in each of a set of METAL TMG graph files. The tasks to complete are to find the closest pair of points in METAL TMG files given as command-line parameters in argv[2] through argv[argc-1]. The tasks are distributed in an order based on the string passed as argv[1], which is one of: "orig": the order that the files are presented on the command line "alpha": alphabetical order by filename "size": from largest to smallest number of points in the file "random": randomized order Jim Teresco, Fall 2021 Siena College */ #include <float.h> #include <limits.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include "timer.h" #include "tmggraph.h" // struct to encapsulate info about the tasks in the bag typedef struct cptask { int num_vertices; char *filename; } cptask; // helper function to read only up to the number of vertices from a // TMG file and return that number int read_tmg_vertex_count(char *filename) { FILE *fp = fopen(filename, "r"); if (!fp) { fprintf(stderr, "Cannot open file %s for reading.\n", filename); exit(1); } // read over first line char temp[100]; fscanf(fp, "%s %s %s", temp, temp, temp); // read number of vertices int nv; fscanf(fp, "%d", &nv); // that's all we need for now fclose(fp); return nv; } int main(int argc, char *argv[]) { int num_threads; // about how many distance calculations? long dcalcs = 0; int worker_rank; int num_tasks; int i; struct timeval start_time, stop_time; double active_time; // all parameters except argv[0] (program name) and argv[1] (input // ordering) will be filenames to load, so the number of tasks is // argc - 2 num_tasks = argc - 2; if (argc < 3) { fprintf(stderr, "Usage: %s orig|alpha|size|random filenames\n", argv[0]); exit(1); } // check for a valid ordering in argv[1]; char *orderings[] = { "orig", "alpha", "size", "random" }; int ordering = -1; for (i = 0; i < 4; i++) { if (strcmp(argv[1], orderings[i]) == 0) { ordering = i; break; } } if (ordering == -1) { fprintf(stderr, "Usage: %s orig|alpha|size|random filenames\n", argv[0]); exit(1); } printf("Have %d tasks to be done\n", num_tasks); // start the timer gettimeofday(&start_time, NULL); // allocate and populate our "bag of tasks" array cptask **tasks = (cptask **)malloc(num_tasks*sizeof(cptask *)); // add the first at pos 0, since we know there's at least one and // this will eliminate some special cases in our code below. tasks[0] = (cptask *)malloc(sizeof(cptask)); tasks[0]->filename = argv[2]; if (ordering == 2) { tasks[0]->num_vertices = read_tmg_vertex_count(argv[3]); } // get them all in for (i = 1; i < num_tasks; i++) { cptask *taski = (cptask *)malloc(sizeof(cptask)); taski->filename = argv[i+2]; int pos = i; int insertat; switch (ordering) { case 0: // original ordering as specified by argv tasks[i] = taski; break; case 1: // alphabetical order by filename while (pos > 0 && strcmp(taski->filename, tasks[pos-1]->filename) < 0) { tasks[pos] = tasks[pos-1]; pos--; } tasks[pos] = taski; break; case 2: // order by size largest to smallest number of vertices taski->num_vertices = read_tmg_vertex_count(taski->filename); while (pos > 0 && taski->num_vertices >= tasks[pos-1]->num_vertices) { tasks[pos] = tasks[pos-1]; pos--; } tasks[pos] = taski; break; case 3: // order randomly insertat = random()%(pos+1); while (pos > insertat) { tasks[pos] = tasks[pos-1]; pos--; } tasks[pos] = taski; break; } } // for thread stats int minjobs = num_tasks+1; int maxjobs = 0; long mincalcs = LONG_MAX; long maxcalcs = 0L; long totalcalcs = 0; double mintime = DBL_MAX; double maxtime = 0.0; // what's the next task available in the bag of tasks (index into array) int next_task = 0; #pragma omp parallel shared(tasks, next_task, minjobs, maxjobs, mincalcs, maxcalcs, totalcalcs, mintime, maxtime, num_threads) { struct timeval start_time, stop_time; // start the timer gettimeofday(&start_time, NULL); int my_task = -1; int jobs_done = 0; long dcalcs = 0L; int thread_num = omp_get_thread_num(); num_threads = omp_get_num_threads(); while (1) { // grab a task from the bag #pragma omp critical(mutex) my_task = next_task++; if (my_task >= num_tasks) break; // this thread can process this one printf("[%d] working on %s\n", thread_num, tasks[my_task]->filename); tmg_graph *g = tmg_load_graph(tasks[my_task]->filename); if (g == NULL) { fprintf(stderr, "Could not create graph from file %s\n", tasks[my_task]->filename); exit(1); } int v1, v2; double distance; // do it tmg_closest_pair(g, &v1, &v2, &distance); jobs_done++; long job_calcs = g->num_vertices; job_calcs *= g->num_vertices; job_calcs /= 2; dcalcs += job_calcs; printf("[%d] %s closest pair #%d %s (%.6f,%.6f) and #%d %s (%.6f,%.6f) distance %.15f\n", thread_num, tasks[my_task]->filename, v1, g->vertices[v1]->w.label, g->vertices[v1]->w.coords.lat, g->vertices[v1]->w.coords.lng, v2, g->vertices[v2]->w.label, g->vertices[v2]->w.coords.lat, g->vertices[v2]->w.coords.lng, distance); tmg_graph_destroy(g); } gettimeofday(&stop_time, NULL); double thread_elapsed_time = diffgettime(start_time, stop_time); // separate critical section for accumulation and update of // simulation stats #pragma omp critical(stats) { if (jobs_done < minjobs) minjobs = jobs_done; if (jobs_done > maxjobs) maxjobs = jobs_done; if (dcalcs < mincalcs) mincalcs = dcalcs; if (dcalcs > maxcalcs) maxcalcs = dcalcs; totalcalcs += dcalcs; if (thread_elapsed_time < mintime) mintime = thread_elapsed_time; if (thread_elapsed_time > maxtime) maxtime = thread_elapsed_time; } printf("[%d] terminating\n", thread_num); } // get main thread's elapsed time gettimeofday(&stop_time, NULL); active_time = diffgettime(start_time, stop_time); double avgjobs = 1.0*num_tasks/num_threads; printf("Main thread was active for %.4f seconds\n", active_time); printf("%d workers processed %d jobs with about %ld distance calculations\n", num_threads, num_tasks, totalcalcs); printf("Job balance: min %d, max %d, avg: %.2f\n", minjobs, maxjobs, avgjobs); printf("Distance calculation balance: min %ld, max %ld, avg: %.2f\n", mincalcs, maxcalcs, ((1.0*totalcalcs)/num_threads)); printf("Active time balance: min %.4f, max %.4f\n", mintime, maxtime); for (i = 0; i < num_tasks; i++) { free(tasks[i]); } free(tasks); return 0; }
tinyexr.h
/* Copyright (c) 2014 - 2015, Syoyo Fujita All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the <organization> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __TINYEXR_H__ #define __TINYEXR_H__ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #ifdef __cplusplus extern "C" { #endif // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) //#define TINYEXR_COMPRESSIONTYPE_RLE (1) // not supported yet #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) typedef struct _EXRAttribute { char *name; char *type; int size; unsigned char *value; // uint8_t* } EXRAttribute; typedef struct _EXRImage { // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) EXRAttribute custom_attributes[TINYEXR_MAX_ATTRIBUTES]; int num_custom_attributes; int num_channels; const char **channel_names; unsigned char **images; // image[channels][pixels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) int width; int height; float pixel_aspect_ratio; int compression; // compression type(TINYEXR_COMPRESSIONTYPE_*) int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; } EXRImage; typedef struct _DeepImage { int num_channels; const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int width; int height; } DeepImage; // @deprecated { to be removed. } // Loads single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Return 0 if success // Returns error string in `err` when there's an error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // Parse single-frame OpenEXR header from a file and initialize `EXRImage` // struct. // Users then call LoadMultiChannelEXRFromFile to actually load image data into // `EXRImage` extern int ParseMultiChannelEXRHeaderFromFile(EXRImage *image, const char *filename, const char **err); // Parse single-frame OpenEXR header from a memory and initialize `EXRImage` // struct. // Users then call LoadMultiChannelEXRFromMemory to actually load image data // into `EXRImage` extern int ParseMultiChannelEXRHeaderFromMemory(EXRImage *image, const unsigned char *memory, const char **err); // Loads multi-channel, single-frame OpenEXR image from a file. // Application must setup `ParseMultiChannelEXRHeaderFromFile` before calling // `LoadMultiChannelEXRFromFile`. // Application can free EXRImage using `FreeExrImage` // Return 0 if success // Returns error string in `err` when there's an error extern int LoadMultiChannelEXRFromFile(EXRImage *image, const char *filename, const char **err); // Loads multi-channel, single-frame OpenEXR image from a memory. // Application must setup `EXRImage` with `ParseMultiChannelEXRHeaderFromMemory` // before calling `LoadMultiChannelEXRFromMemory`. // Application can free EXRImage using `FreeExrImage` // Return 0 if success // Returns error string in `err` when there's an error extern int LoadMultiChannelEXRFromMemory(EXRImage *image, const unsigned char *memory, const char **err); // Saves floating point RGBA image as OpenEXR. // Image is compressed using EXRImage.compression value. // Return 0 if success // Returns error string in `err` when there's an error // extern int SaveEXR(const float *in_rgba, int width, int height, // const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // `compression_type` is one of TINYEXR_COMPRESSIONTYPE_*. // Returns 0 if success // Returns error string in `err` when there's an error extern int SaveMultiChannelEXRToFile(const EXRImage *image, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if succes. // Retruns 0 if success, negative number when failed. // Returns error string in `err` when there's an error extern size_t SaveMultiChannelEXRToMemory(const EXRImage *image, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns 0 if success // Returns error string in `err` when there's an error extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Return 0 if success // Returns error string in `err` when there's an error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // Initialize of EXRImage struct extern void InitEXRImage(EXRImage *exrImage); // Free's internal data of EXRImage struct // Returns 0 if success. extern int FreeEXRImage(EXRImage *exrImage); // For emscripten. // Parse single-frame OpenEXR header from memory. // Return 0 if success extern int ParseEXRHeaderFromMemory(EXRAttribute *customAttributes, int *numCustomAttributes, int *width, int *height, const unsigned char *memory); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // `out_rgba` must have enough memory(at least sizeof(float) x 4(RGBA) x width x // hight) // Return 0 if success // Returns error string in `err` when there's an error extern int LoadEXRFromMemory(float *out_rgba, const unsigned char *memory, const char **err); #ifdef __cplusplus } #endif #ifdef TINYEXR_IMPLEMENTATION #include <cstdio> #include <cstdlib> #include <cassert> #include <cstring> #include <algorithm> #include <string> #include <vector> #include "tinyexr.h" #ifdef _OPENMP #include <omp.h> #endif namespace { namespace miniz { /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED #include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. //#define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. //#define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) #include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1, } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; #include <string.h> #include <assert.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void) x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } static void *def_realloc_func(void *opaque, void *address, size_t items, size_t size) { (void)opaque, (void)address, (void)items, (void)size; return MZ_REALLOC(address, items * size); } const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index: \ ; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) \ break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) \ break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1U << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) \ packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to 'int', // possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init(pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = { 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename(mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 * mz_zip_reader_get_cdh(mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room(pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer(pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file(&zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file(&zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- } bool IsBigEndian(void) { union { unsigned int i; char c[4]; } bint = {0x01020304}; return bint.c[0] == 1; } void swap2(unsigned short *val) { unsigned short tmp = *val; unsigned char *dst = (unsigned char *)val; unsigned char *src = (unsigned char *)&tmp; dst[0] = src[1]; dst[1] = src[0]; } void swap4(unsigned int *val) { unsigned int tmp = *val; unsigned char *dst = (unsigned char *)val; unsigned char *src = (unsigned char *)&tmp; dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; } void swap8(unsigned long long *val) { unsigned long long tmp = (*val); unsigned char *dst = (unsigned char *)val; unsigned char *src = (unsigned char *)&tmp; dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fff) << 13; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000) << 16; // sign bit return o; } FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = newexp; o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 const char *ReadString(std::string &s, const char *ptr) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((*q) != 0) q++; s = std::string(p, q); return q + 1; // skip '\0' } const char *ReadAttribute(std::string &name, std::string &ty, std::vector<unsigned char> &data, const char *ptr) { if ((*ptr) == 0) { // end of attribute. return NULL; } const char *p = ReadString(name, ptr); p = ReadString(ty, p); int dataLen; memcpy(&dataLen, p, sizeof(int)); p += 4; if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&dataLen)); } data.resize(dataLen); memcpy(&data.at(0), p, dataLen); p += dataLen; return p; } void WriteAttribute(FILE *fp, const char *name, const char *type, const unsigned char *data, int len) { size_t n = fwrite(name, 1, strlen(name) + 1, fp); assert(n == strlen(name) + 1); n = fwrite(type, 1, strlen(type) + 1, fp); assert(n == strlen(type) + 1); int outLen = len; if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&outLen)); } n = fwrite(&outLen, 1, sizeof(int), fp); assert(n == sizeof(int)); n = fwrite(data, 1, len, fp); assert(n == (size_t)len); (void)n; } void WriteAttributeToMemory(std::vector<unsigned char> &out, const char *name, const char *type, const unsigned char *data, int len) { out.insert(out.end(), name, name + strlen(name) + 1); out.insert(out.end(), type, type + strlen(type) + 1); int outLen = len; if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&outLen)); } out.insert(out.end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out.insert(out.end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixelType; unsigned char pLinear; int xSampling; int ySampling; } ChannelInfo; void ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; p = ReadString(info.name, p); memcpy(&info.pixelType, p, sizeof(int)); p += 4; info.pLinear = p[0]; // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.xSampling, p, sizeof(int)); // int p += 4; memcpy(&info.ySampling, p, sizeof(int)); // int p += 4; if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&info.pixelType)); swap4(reinterpret_cast<unsigned int *>(&info.xSampling)); swap4(reinterpret_cast<unsigned int *>(&info.ySampling)); } channels.push_back(info); } } void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixelType = channels[c].pixelType; int xSampling = channels[c].xSampling; int ySampling = channels[c].ySampling; if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&pixelType)); swap4(reinterpret_cast<unsigned int *>(&xSampling)); swap4(reinterpret_cast<unsigned int *>(&ySampling)); } memcpy(p, &pixelType, sizeof(int)); p += sizeof(int); (*p) = channels[c].pLinear; p += 4; memcpy(p, &xSampling, sizeof(int)); p += sizeof(int); memcpy(p, &ySampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } void CompressZip(unsigned char *dst, unsigned long long &compressedSize, const unsigned char *src, unsigned long srcSize) { std::vector<unsigned char> tmpBuf(srcSize); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // { char *t1 = (char *)&tmpBuf.at(0); char *t2 = (char *)&tmpBuf.at(0) + (srcSize + 1) / 2; const char *stop = (const char *)src + srcSize; for (;;) { if ((const char *)src < stop) *(t1++) = *(src++); else break; if ((const char *)src < stop) *(t2++) = *(src++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + srcSize; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = d; ++t; } } // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(srcSize); int ret = miniz::mz_compress(dst, &outSize, (const unsigned char *)&tmpBuf.at(0), srcSize); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; } void DecompressZip(unsigned char *dst, unsigned long &uncompressedSize, const unsigned char *src, unsigned long srcSize) { std::vector<unsigned char> tmpBuf(uncompressedSize); int ret = miniz::mz_uncompress(&tmpBuf.at(0), &uncompressedSize, src, srcSize); assert(ret == miniz::MZ_OK); (void)ret; // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressedSize; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = d; ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressedSize + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressedSize; for(;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } } // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = a; short bs = b; short ms = (as + bs) >> 1; short ds = as - bs; l = ms; h = ds; } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = l; short hs = h; int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = ai; short bs = ai - hi; a = as; b = bs; } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = m; h = d; } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = bb; a = aa; } // // 2D Wavelet encoding: // void wav2Encode(unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // void wav2Decode(unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = (c >> (lc -= 8)); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(unsigned char *)(in++); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = hcode[i]; if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // int hlink[HUF_ENCSIZE]; long long *fHeap[HUF_ENCSIZE]; *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); long long scode[HUF_ENCSIZE]; memset(scode, 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m; true; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm; true; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode); memcpy(frq, scode, sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; void hufPackEncTable(const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // bool hufUnpackEncTable(const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode > ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1 << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #define getCode(po, rlc, c, lc, in, out, oe) \ { \ if (po == rlc) { \ if (lc < 8) \ getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) \ return false; \ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) \ *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } // // Decode (uncompress) ni bits based on encoding & decoding tables: // bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; unsigned short *oe = out + no; const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; getCode(pl.lit, rlc, c, lc, in, out, oe); } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; getCode(pl.p[j], rlc, c, lc, in, out, oe); break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; getCode(pl.lit, rlc, c, lc, in, out, oe); } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } void countFrequencies(long long freq[HUF_ENCSIZE], const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; long long freq[HUF_ENCSIZE]; countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq, &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq, im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq, raw, nRaw, iM, dataStart); int dataLength = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + dataLength - compressed; } bool hufUncompress(const char compressed[], int nCompressed, unsigned short raw[], int nRaw) { if (nCompressed == 0) { if (nRaw != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, nRaw, raw); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } unsigned short forwardLutFromBitmap(const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 unsigned short reverseLutFromBitmap(const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } bool CompressPiz(unsigned char *outPtr, unsigned int &outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int dataWidth, int numLines) { unsigned char bitmap[BITMAP_SIZE]; unsigned short minNonZero; unsigned short maxNonZero; if (IsBigEndian()) { // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; } // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); int i = 0; for (size_t c = 0; c < channelData.size(); c++, i++) { PIZChannelData &cd = channelData[i]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = dataWidth; cd.ny = numLines; // cd.ys = c.channel().ySampling; int pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[i].pixelType == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = pixelSize / sizeof(short); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < numLines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; int n = cd.nx * cd.size; memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), tmpBuffer.size(), bitmap, minNonZero, maxNonZero); unsigned short lut[USHORT_RANGE]; unsigned short maxValue = forwardLutFromBitmap(bitmap, lut); applyLut(lut, &tmpBuffer.at(0), tmpBuffer.size()); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, (char *)&bitmap[0] + minNonZero, maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), tmpBuffer.size(), buf); memcpy(lengthPtr, &length, sizeof(int)); outSize = (reinterpret_cast<unsigned char *>(buf) - outPtr) + length; return true; } bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, const std::vector<ChannelInfo> &channelInfo, int dataWidth, int numLines) { unsigned char bitmap[BITMAP_SIZE]; unsigned short minNonZero; unsigned short maxNonZero; if (IsBigEndian()) { // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; } memset(bitmap, 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy((char *)&bitmap[0] + minNonZero, ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } unsigned short lut[USHORT_RANGE]; memset(lut, 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap, lut); // // Huffman decoding // int length; length = *(reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer.at(0), tmpBufSize); // // Wavelet decoding // std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < channelInfo.size(); ++i) { const ChannelInfo &chan = channelInfo[i]; int pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixelType == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = dataWidth; channelData[i].ny = numLines; // channelData[i].ys = 1; channelData[i].size = pixelSize / sizeof(short); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut, &tmpBuffer.at(0), tmpBufSize); for (int y = 0; y < numLines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; int n = cd.nx * cd.size; memcpy(outPtr, cd.end, n * sizeof(unsigned short)); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } // // ----------------------------------------------------------------- // } // namespace int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { if (out_rgba == NULL) { if (err) { (*err) = "Invalid argument.\n"; } return -1; } EXRImage exrImage; InitEXRImage(&exrImage); { int ret = ParseMultiChannelEXRHeaderFromFile(&exrImage, filename, err); if (ret != 0) { return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exrImage.num_channels; i++) { if (exrImage.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exrImage.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } { int ret = LoadMultiChannelEXRFromFile(&exrImage, filename, err); if (ret != 0) { return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exrImage.num_channels; c++) { if (strcmp(exrImage.channel_names[c], "R") == 0) { idxR = c; } else if (strcmp(exrImage.channel_names[c], "G") == 0) { idxG = c; } else if (strcmp(exrImage.channel_names[c], "B") == 0) { idxB = c; } else if (strcmp(exrImage.channel_names[c], "A") == 0) { idxA = c; } } if (idxR == -1) { if (err) { (*err) = "R channel not found\n"; } // @todo { free exrImage } return -1; } if (idxG == -1) { if (err) { (*err) = "G channel not found\n"; } // @todo { free exrImage } return -1; } if (idxB == -1) { if (err) { (*err) = "B channel not found\n"; } // @todo { free exrImage } return -1; } (*out_rgba) = (float *)malloc(4 * sizeof(float) * exrImage.width * exrImage.height); for (int i = 0; i < exrImage.width * exrImage.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exrImage.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exrImage.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exrImage.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exrImage.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } (*width) = exrImage.width; (*height) = exrImage.height; // @todo { free exrImage } return 0; } int ParseEXRHeaderFromMemory(EXRAttribute *customAttributes, int *numCustomAttributes, int *width, int *height, const unsigned char *memory) { if (memory == NULL) { // Invalid argument return -1; } const char *buf = reinterpret_cast<const char *>(memory); const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { // if (err) { // (*err) = "Header mismatch."; //} return -3; } marker += 4; } // Version, scanline. { // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 0 || marker[2] != 0 || marker[3] != 0) { // if (err) { // (*err) = "Unsupported version or scanline."; //} return -4; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int lineOrder = 0; // @fixme int displayWindow[4] = {-1, -1, -1, -1}; // @fixme float screenWindowCenter[2] = {0.0f, 0.0f}; // @fixme float screenWindowWidth = 1.0f; // @fixme int numChannels = -1; float pixelAspectRatio = 1.0f; // @fixme std::vector<ChannelInfo> channels; std::vector<EXRAttribute> attribs; if (numCustomAttributes) { (*numCustomAttributes) = 0; } // Read attributes for (;;) { std::string attrName; std::string attrType; std::vector<unsigned char> data; const char *marker_next = ReadAttribute(attrName, attrType, data, marker); if (marker_next == NULL) { marker++; // skip '\0' break; } if (attrName.compare("compression") == TINYEXR_COMPRESSIONTYPE_NONE) { // mwkm // 0 : NO_COMPRESSION // 1 : RLE // 2 : ZIPS (Single scanline) // 3 : ZIP (16-line block) // 4 : PIZ (32-line block) if (data[0] > TINYEXR_COMPRESSIONTYPE_PIZ) { // if (err) { // (*err) = "Unsupported compression type."; //} return -5; } } else if (attrName.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int ReadChannelInfo(channels, data); numChannels = channels.size(); if (numChannels < 1) { // if (err) { // (*err) = "Invalid channels format."; //} return -6; } } else if (attrName.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&dx)); swap4(reinterpret_cast<unsigned int *>(&dy)); swap4(reinterpret_cast<unsigned int *>(&dw)); swap4(reinterpret_cast<unsigned int *>(&dh)); } } else if (attrName.compare("displayWindow") == 0) { memcpy(&displayWindow[0], &data.at(0), sizeof(int)); memcpy(&displayWindow[1], &data.at(4), sizeof(int)); memcpy(&displayWindow[2], &data.at(8), sizeof(int)); memcpy(&displayWindow[3], &data.at(12), sizeof(int)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&displayWindow[0])); swap4(reinterpret_cast<unsigned int *>(&displayWindow[1])); swap4(reinterpret_cast<unsigned int *>(&displayWindow[2])); swap4(reinterpret_cast<unsigned int *>(&displayWindow[3])); } } else if (attrName.compare("lineOrder") == 0) { memcpy(&lineOrder, &data.at(0), sizeof(float)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&lineOrder)); } } else if (attrName.compare("pixelAspectRatio") == 0) { memcpy(&pixelAspectRatio, &data.at(0), sizeof(float)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&pixelAspectRatio)); } } else if (attrName.compare("screenWindowCenter") == 0) { memcpy(&screenWindowCenter[0], &data.at(0), sizeof(float)); memcpy(&screenWindowCenter[1], &data.at(4), sizeof(float)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&screenWindowCenter[0])); swap4(reinterpret_cast<unsigned int *>(&screenWindowCenter[1])); } } else if (attrName.compare("screenWindowWidth") == 0) { memcpy(&screenWindowWidth, &data.at(0), sizeof(float)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&screenWindowWidth)); } } else { // Custom attribute(up to TINYEXR_MAX_ATTRIBUTES) if (numCustomAttributes && ((*numCustomAttributes) < TINYEXR_MAX_ATTRIBUTES)) { EXRAttribute attrib; attrib.name = strdup(attrName.c_str()); attrib.type = strdup(attrType.c_str()); attrib.size = data.size(); attrib.value = (unsigned char *)malloc(data.size()); memcpy((char *)attrib.value, &data.at(0), data.size()); attribs.push_back(attrib); } } marker = marker_next; } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(numChannels >= 1); int dataWidth = dw - dx + 1; int dataHeight = dh - dy + 1; (*width) = dataWidth; (*height) = dataHeight; if (numCustomAttributes) { assert(attribs.size() < TINYEXR_MAX_ATTRIBUTES); (*numCustomAttributes) = attribs.size(); // Assume the pointer to customAttributes has enough memory to store. for (int i = 0; i < (int)attribs.size(); i++) { customAttributes[i] = attribs[i]; } } return 0; } int LoadEXRFromMemory(float *out_rgba, const unsigned char *memory, const char **err) { if (out_rgba == NULL || memory == NULL) { if (err) { (*err) = "Invalid argument.\n"; } return -1; } EXRImage exrImage; InitEXRImage(&exrImage); int ret = LoadMultiChannelEXRFromMemory(&exrImage, memory, err); if (ret != 0) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exrImage.num_channels; c++) { if (strcmp(exrImage.channel_names[c], "R") == 0) { idxR = c; } else if (strcmp(exrImage.channel_names[c], "G") == 0) { idxG = c; } else if (strcmp(exrImage.channel_names[c], "B") == 0) { idxB = c; } else if (strcmp(exrImage.channel_names[c], "A") == 0) { idxA = c; } } if (idxR == -1) { if (err) { (*err) = "R channel not found\n"; } // @todo { free exrImage } return -1; } if (idxG == -1) { if (err) { (*err) = "G channel not found\n"; } // @todo { free exrImage } return -1; } if (idxB == -1) { if (err) { (*err) = "B channel not found\n"; } // @todo { free exrImage } return -1; } // Assume `out_rgba` have enough memory allocated. for (int i = 0; i < exrImage.width * exrImage.height; i++) { out_rgba[4 * i + 0] = reinterpret_cast<float **>(exrImage.images)[idxR][i]; out_rgba[4 * i + 1] = reinterpret_cast<float **>(exrImage.images)[idxG][i]; out_rgba[4 * i + 2] = reinterpret_cast<float **>(exrImage.images)[idxB][i]; if (idxA > 0) { out_rgba[4 * i + 3] = reinterpret_cast<float **>(exrImage.images)[idxA][i]; } else { out_rgba[4 * i + 3] = 1.0; } } return 0; } int LoadMultiChannelEXRFromFile(EXRImage *exrImage, const char *filename, const char **err) { if (exrImage == NULL) { if (err) { (*err) = "Invalid argument."; } return -1; } FILE *fp = fopen(filename, "rb"); if (!fp) { if (err) { (*err) = "Cannot read file."; } return -1; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = ftell(fp); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadMultiChannelEXRFromMemory(exrImage, &buf.at(0), err); } int LoadMultiChannelEXRFromMemory(EXRImage *exrImage, const unsigned char *memory, const char **err) { if (exrImage == NULL || memory == NULL) { if (err) { (*err) = "Invalid argument."; } return -1; } const char *buf = reinterpret_cast<const char *>(memory); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { if (err) { (*err) = "Header mismatch."; } return -3; } marker += 4; } // Version, scanline. { // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 0 || marker[2] != 0 || marker[3] != 0) { if (err) { (*err) = "Unsupported version or scanline."; } return -4; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int numScanlineBlocks = 1; // 16 for ZIP compression. int compressionType = -1; int numChannels = -1; unsigned char lineOrder = 0; // 0 -> increasing y; 1 -> decreasing std::vector<ChannelInfo> channels; // Read attributes for (;;) { std::string attrName; std::string attrType; std::vector<unsigned char> data; const char *marker_next = ReadAttribute(attrName, attrType, data, marker); if (marker_next == NULL) { marker++; // skip '\0' break; } if (attrName.compare("compression") == 0) { // mwkm // 0 : NO_COMPRESSION // 1 : RLE // 2 : ZIPS (Single scanline) // 3 : ZIP (16-line block) // 4 : PIZ (32-line block) if (data[0] != TINYEXR_COMPRESSIONTYPE_NONE && data[0] != TINYEXR_COMPRESSIONTYPE_ZIPS && data[0] != TINYEXR_COMPRESSIONTYPE_ZIP && data[0] != TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "Unsupported compression type."; } return -5; } compressionType = data[0]; if (compressionType == TINYEXR_COMPRESSIONTYPE_ZIP) { numScanlineBlocks = 16; } else if (compressionType == TINYEXR_COMPRESSIONTYPE_PIZ) { numScanlineBlocks = 32; } } else if (attrName.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int ReadChannelInfo(channels, data); numChannels = channels.size(); if (numChannels < 1) { if (err) { (*err) = "Invalid channels format."; } return -6; } } else if (attrName.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&dx)); swap4(reinterpret_cast<unsigned int *>(&dy)); swap4(reinterpret_cast<unsigned int *>(&dw)); swap4(reinterpret_cast<unsigned int *>(&dh)); } } else if (attrName.compare("displayWindow") == 0) { int x, y, w, h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&x)); swap4(reinterpret_cast<unsigned int *>(&y)); swap4(reinterpret_cast<unsigned int *>(&w)); swap4(reinterpret_cast<unsigned int *>(&h)); } } else if (attrName.compare("lineOrder") == 0) { memcpy(&lineOrder, &data.at(0), sizeof(lineOrder)); } marker = marker_next; } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(numChannels >= 1); int dataWidth = dw - dx + 1; int dataHeight = dh - dy + 1; // Read offset tables. int numBlocks = dataHeight / numScanlineBlocks; if (numBlocks * numScanlineBlocks < dataHeight) { numBlocks++; } std::vector<long long> offsets(numBlocks); for (int y = 0; y < numBlocks; y++) { long long offset; memcpy(&offset, marker, sizeof(long long)); if (IsBigEndian()) { swap8(reinterpret_cast<unsigned long long *>(&offset)); } marker += sizeof(long long); // = 8 offsets[y] = offset; } exrImage->images = reinterpret_cast<unsigned char **>( (float **)malloc(sizeof(float *) * numChannels)); std::vector<size_t> channelOffsetList(numChannels); int pixelDataSize = 0; size_t channelOffset = 0; for (int c = 0; c < numChannels; c++) { channelOffsetList[c] = channelOffset; if (channels[c].pixelType == TINYEXR_PIXELTYPE_HALF) { pixelDataSize += sizeof(unsigned short); channelOffset += sizeof(unsigned short); // Alloc internal image for half type. if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { exrImage->images[c] = reinterpret_cast<unsigned char *>((unsigned short *)malloc( sizeof(unsigned short) * dataWidth * dataHeight)); } else if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { exrImage->images[c] = reinterpret_cast<unsigned char *>( (float *)malloc(sizeof(float) * dataWidth * dataHeight)); } else { assert(0); } } else if (channels[c].pixelType == TINYEXR_PIXELTYPE_FLOAT) { pixelDataSize += sizeof(float); channelOffset += sizeof(float); exrImage->images[c] = reinterpret_cast<unsigned char *>( (float *)malloc(sizeof(float) * dataWidth * dataHeight)); } else if (channels[c].pixelType == TINYEXR_PIXELTYPE_UINT) { pixelDataSize += sizeof(unsigned int); channelOffset += sizeof(unsigned int); exrImage->images[c] = reinterpret_cast<unsigned char *>(( unsigned int *)malloc(sizeof(unsigned int) * dataWidth * dataHeight)); } else { assert(0); } } #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < numBlocks; y++) { const unsigned char *dataPtr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) int lineNo; memcpy(&lineNo, dataPtr, sizeof(int)); int dataLen; memcpy(&dataLen, dataPtr + 4, sizeof(int)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&lineNo)); swap4(reinterpret_cast<unsigned int *>(&dataLen)); } int endLineNo = (std::min)(lineNo + numScanlineBlocks, dataHeight); int numLines = endLineNo - lineNo; if (compressionType == 4) { // PIZ // Allocate original data size. std::vector<unsigned char> outBuf(dataWidth * numLines * pixelDataSize); size_t tmpBufLen = dataWidth * numLines * pixelDataSize; DecompressPiz(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dataPtr + 8, tmpBufLen, channels, dataWidth, numLines); bool isBigEndian = IsBigEndian(); // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (int c = 0; c < numChannels; c++) { if (channels[c].pixelType == TINYEXR_PIXELTYPE_HALF) { for (int v = 0; v < numLines; v++) { const unsigned short *linePtr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixelDataSize * dataWidth + channelOffsetList[c] * dataWidth)); for (int u = 0; u < dataWidth; u++) { FP16 hf; hf.u = linePtr[u]; if (isBigEndian) { swap2(reinterpret_cast<unsigned short *>(&hf.u)); } if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(exrImage->images)[c]; if (lineOrder == 0) { image += (lineNo + v) * dataWidth + u; } else { image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(exrImage->images)[c]; if (lineOrder == 0) { image += (lineNo + v) * dataWidth + u; } else { image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u; } *image = f32.f; } } } } else if (channels[c].pixelType == TINYEXR_PIXELTYPE_UINT) { assert(exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (int v = 0; v < numLines; v++) { const unsigned int *linePtr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixelDataSize * dataWidth + channelOffsetList[c] * dataWidth)); for (int u = 0; u < dataWidth; u++) { unsigned int val = linePtr[u]; if (isBigEndian) { swap4(&val); } unsigned int *image = reinterpret_cast<unsigned int **>(exrImage->images)[c]; if (lineOrder == 0) { image += (lineNo + v) * dataWidth + u; } else { image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u; } *image = val; } } } else if (channels[c].pixelType == TINYEXR_PIXELTYPE_FLOAT) { assert(exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (int v = 0; v < numLines; v++) { const float *linePtr = reinterpret_cast<float *>( &outBuf.at(v * pixelDataSize * dataWidth + channelOffsetList[c] * dataWidth)); for (int u = 0; u < dataWidth; u++) { float val = linePtr[u]; if (isBigEndian) { swap4(reinterpret_cast<unsigned int *>(&val)); } float *image = reinterpret_cast<float **>(exrImage->images)[c]; if (lineOrder == 0) { image += (lineNo + v) * dataWidth + u; } else { image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u; } *image = val; } } } else { assert(0); } } // mwkm, ZIPS or ZIP both good to go } else if (compressionType == 2 || compressionType == 3) { // ZIP // Allocate original data size. std::vector<unsigned char> outBuf(dataWidth * numLines * pixelDataSize); unsigned long dstLen = outBuf.size(); DecompressZip(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, dataPtr + 8, dataLen); bool isBigEndian = IsBigEndian(); // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (int c = 0; c < numChannels; c++) { if (channels[c].pixelType == TINYEXR_PIXELTYPE_HALF) { for (int v = 0; v < numLines; v++) { const unsigned short *linePtr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixelDataSize * dataWidth + channelOffsetList[c] * dataWidth)); for (int u = 0; u < dataWidth; u++) { FP16 hf; hf.u = linePtr[u]; if (isBigEndian) { swap2(reinterpret_cast<unsigned short *>(&hf.u)); } if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(exrImage->images)[c]; if (lineOrder == 0) { image += (lineNo + v) * dataWidth + u; } else { image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(exrImage->images)[c]; if (lineOrder == 0) { image += (lineNo + v) * dataWidth + u; } else { image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u; } *image = f32.f; } } } } else if (channels[c].pixelType == TINYEXR_PIXELTYPE_UINT) { assert(exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (int v = 0; v < numLines; v++) { const unsigned int *linePtr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixelDataSize * dataWidth + channelOffsetList[c] * dataWidth)); for (int u = 0; u < dataWidth; u++) { unsigned int val = linePtr[u]; if (isBigEndian) { swap4(&val); } unsigned int *image = reinterpret_cast<unsigned int **>(exrImage->images)[c]; if (lineOrder == 0) { image += (lineNo + v) * dataWidth + u; } else { image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u; } *image = val; } } } else if (channels[c].pixelType == TINYEXR_PIXELTYPE_FLOAT) { assert(exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (int v = 0; v < numLines; v++) { const float *linePtr = reinterpret_cast<float *>( &outBuf.at(v * pixelDataSize * dataWidth + channelOffsetList[c] * dataWidth)); for (int u = 0; u < dataWidth; u++) { float val = linePtr[u]; if (isBigEndian) { swap4(reinterpret_cast<unsigned int *>(&val)); } float *image = reinterpret_cast<float **>(exrImage->images)[c]; if (lineOrder == 0) { image += (lineNo + v) * dataWidth + u; } else { image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u; } *image = val; } } } else { assert(0); } } } else if (compressionType == 0) { // No compression bool isBigEndian = IsBigEndian(); for (int c = 0; c < numChannels; c++) { if (channels[c].pixelType == TINYEXR_PIXELTYPE_HALF) { const unsigned short *linePtr = reinterpret_cast<const unsigned short *>( dataPtr + 8 + c * dataWidth * sizeof(unsigned short)); if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(exrImage->images[c]); if (lineOrder == 0) { outLine += y * dataWidth; } else { outLine += (dataHeight - 1 - y) * dataWidth; } for (int u = 0; u < dataWidth; u++) { FP16 hf; hf.u = linePtr[u]; if (isBigEndian) { swap2(reinterpret_cast<unsigned short *>(&hf.u)); } outLine[u] = hf.u; } } else if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(exrImage->images[c]); if (lineOrder == 0) { outLine += y * dataWidth; } else { outLine += (dataHeight - 1 - y) * dataWidth; } for (int u = 0; u < dataWidth; u++) { FP16 hf; hf.u = linePtr[u]; if (isBigEndian) { swap2(reinterpret_cast<unsigned short *>(&hf.u)); } FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); } } else if (channels[c].pixelType == TINYEXR_PIXELTYPE_FLOAT) { const float *linePtr = reinterpret_cast<const float *>( dataPtr + 8 + c * dataWidth * sizeof(float)); float *outLine = reinterpret_cast<float *>(exrImage->images[c]); if (lineOrder == 0) { outLine += y * dataWidth; } else { outLine += (dataHeight - 1 - y) * dataWidth; } for (int u = 0; u < dataWidth; u++) { float val = linePtr[u]; if (isBigEndian) { swap4(reinterpret_cast<unsigned int *>(&val)); } outLine[u] = val; } } else if (channels[c].pixelType == TINYEXR_PIXELTYPE_UINT) { const unsigned int *linePtr = reinterpret_cast<const unsigned int *>( dataPtr + 8 + c * dataWidth * sizeof(unsigned int)); unsigned int *outLine = reinterpret_cast<unsigned int *>(exrImage->images[c]); if (lineOrder == 0) { outLine += y * dataWidth; } else { outLine += (dataHeight - 1 - y) * dataWidth; } for (int u = 0; u < dataWidth; u++) { unsigned int val = linePtr[u]; if (isBigEndian) { swap4(reinterpret_cast<unsigned int *>(&val)); } outLine[u] = val; } } } } } // omp parallel { exrImage->channel_names = (const char **)malloc(sizeof(const char *) * numChannels); for (int c = 0; c < numChannels; c++) { #ifdef _WIN32 exrImage->channel_names[c] = _strdup(channels[c].name.c_str()); #else exrImage->channel_names[c] = strdup(channels[c].name.c_str()); #endif } exrImage->num_channels = numChannels; exrImage->width = dataWidth; exrImage->height = dataHeight; // Fill with requested_pixel_types. exrImage->pixel_types = (int *)malloc(sizeof(int *) * numChannels); for (int c = 0; c < numChannels; c++) { exrImage->pixel_types[c] = exrImage->requested_pixel_types[c]; } } return 0; // OK } // @deprecated #if 0 int SaveEXR(const float *in_rgba, int width, int height, const char *filename, const char **err) { if (in_rgba == NULL || filename == NULL) { if (err) { (*err) = "Invalid argument."; } return -1; } FILE *fp = fopen(filename, "wb"); if (!fp) { if (err) { (*err) = "Cannot write a file."; } return -1; } // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; size_t n = fwrite(header, 1, 4, fp); assert(n == 4); } // Version, scanline. { const char marker[] = {2, 0, 0, 0}; size_t n = fwrite(marker, 1, 4, fp); assert(n == 4); } int numScanlineBlocks = 16; // 16 for ZIP compression. // Write attributes. { unsigned char data[] = { 'A', 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 'B', 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 'G', 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 'R', 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0}; // last 0 = // terminator. WriteAttribute(fp, "channels", "chlist", data, 18 * 4 + 1); // +1 = null } { int compressionType = 3; // ZIP compression WriteAttribute(fp, "compression", "compression", reinterpret_cast<const unsigned char *>(&compressionType), 1); } { int data[4] = {0, 0, width - 1, height - 1}; WriteAttribute(fp, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); WriteAttribute(fp, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char lineOrder = 0; // increasingY WriteAttribute(fp, "lineOrder", "lineOrder", &lineOrder, 1); } { float aspectRatio = 1.0f; WriteAttribute(fp, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; WriteAttribute(fp, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = (float)width; WriteAttribute(fp, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } { // end of header unsigned char e = 0; fwrite(&e, 1, 1, fp); } int numBlocks = height / numScanlineBlocks; if (numBlocks * numScanlineBlocks < height) { numBlocks++; } std::vector<long long> offsets(numBlocks); size_t headerSize = ftell(fp); // sizeof(header) long long offset = headerSize + numBlocks * sizeof(long long); // sizeof(header) + sizeof(offsetTable) std::vector<unsigned char> data; for (int i = 0; i < numBlocks; i++) { int startY = numScanlineBlocks * i; int endY = (std::min)(numScanlineBlocks * (i + 1), height); int h = endY - startY; std::vector<unsigned short> buf(4 * width * h); for (int y = 0; y < h; y++) { for (int x = 0; x < width; x++) { FP32 r, g, b, a; r.f = in_rgba[4 * ((y + startY) * width + x) + 0]; g.f = in_rgba[4 * ((y + startY) * width + x) + 1]; b.f = in_rgba[4 * ((y + startY) * width + x) + 2]; a.f = in_rgba[4 * ((y + startY) * width + x) + 3]; FP16 hr, hg, hb, ha; hr = float_to_half_full(r); hg = float_to_half_full(g); hb = float_to_half_full(b); ha = float_to_half_full(a); // Assume increasing Y buf[4 * y * width + 3 * width + x] = hr.u; buf[4 * y * width + 2 * width + x] = hg.u; buf[4 * y * width + 1 * width + x] = hb.u; buf[4 * y * width + 0 * width + x] = ha.u; } } int bound = miniz::mz_compressBound(buf.size() * sizeof(unsigned short)); std::vector<unsigned char> block( miniz::mz_compressBound(buf.size() * sizeof(unsigned short))); unsigned long long outSize = block.size(); CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size() * sizeof(unsigned short)); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int dataLen = outSize; // truncate memcpy(&header.at(0), &startY, sizeof(int)); memcpy(&header.at(4), &dataLen, sizeof(unsigned int)); data.insert(data.end(), header.begin(), header.end()); data.insert(data.end(), block.begin(), block.begin() + dataLen); offsets[i] = offset; offset += dataLen + 8; // 8 = sizeof(blockHeader) } fwrite(&offsets.at(0), 1, sizeof(unsigned long long) * numBlocks, fp); fwrite(&data.at(0), 1, data.size(), fp); fclose(fp); return 0; // OK } #endif size_t SaveMultiChannelEXRToMemory(const EXRImage *exrImage, unsigned char **memory_out, const char **err) { if (exrImage == NULL || memory_out == NULL || exrImage->compression < 0 || exrImage->compression > TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "Invalid argument."; } return 0; } std::vector<unsigned char> memory; // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { const char marker[] = {2, 0, 0, 0}; memory.insert(memory.end(), marker, marker + 4); } int numScanlines = 1; if (exrImage->compression == TINYEXR_COMPRESSIONTYPE_ZIP) { numScanlines = 16; } else if (exrImage->compression == TINYEXR_COMPRESSIONTYPE_PIZ) { numScanlines = 32; } // Write attributes. std::vector<ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exrImage->num_channels; c++) { ChannelInfo info; info.pLinear = 0; info.pixelType = exrImage->requested_pixel_types[c]; info.xSampling = 1; info.ySampling = 1; info.name = std::string(exrImage->channel_names[c]); channels.push_back(info); } WriteChannelInfo(data, channels); WriteAttributeToMemory(memory, "channels", "chlist", &data.at(0), data.size()); // +1 = null } { int comp = exrImage->compression; if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&comp)); } WriteAttributeToMemory(memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = {0, 0, exrImage->width - 1, exrImage->height - 1}; if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&data[0])); swap4(reinterpret_cast<unsigned int *>(&data[1])); swap4(reinterpret_cast<unsigned int *>(&data[2])); swap4(reinterpret_cast<unsigned int *>(&data[3])); } WriteAttributeToMemory(memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); WriteAttributeToMemory(memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char lineOrder = 0; // increasingY WriteAttributeToMemory(memory, "lineOrder", "lineOrder", &lineOrder, 1); } { float aspectRatio = 1.0f; if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); } WriteAttributeToMemory( memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&center[0])); swap4(reinterpret_cast<unsigned int *>(&center[1])); } WriteAttributeToMemory(memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = (float)exrImage->width; if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&w)); } WriteAttributeToMemory(memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exrImage->num_custom_attributes > 0) { // @todo { endian } for (int i = 0; i < exrImage->num_custom_attributes; i++) { WriteAttributeToMemory(memory, exrImage->custom_attributes[i].name, exrImage->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( &exrImage->custom_attributes[i].value), exrImage->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int numBlocks = exrImage->height / numScanlines; if (numBlocks * numScanlines < exrImage->height) { numBlocks++; } std::vector<long long> offsets(numBlocks); size_t headerSize = memory.size(); long long offset = headerSize + numBlocks * sizeof(long long); // sizeof(header) + sizeof(offsetTable) std::vector<unsigned char> data; bool isBigEndian = IsBigEndian(); std::vector<std::vector<unsigned char> > dataList(numBlocks); std::vector<size_t> channelOffsetList(exrImage->num_channels); int pixelDataSize = 0; size_t channelOffset = 0; for (int c = 0; c < exrImage->num_channels; c++) { channelOffsetList[c] = channelOffset; if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixelDataSize += sizeof(unsigned short); channelOffset += sizeof(unsigned short); } else if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixelDataSize += sizeof(float); channelOffset += sizeof(float); } else if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixelDataSize += sizeof(unsigned int); channelOffset += sizeof(unsigned int); } else { assert(0); } } #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < numBlocks; i++) { int startY = numScanlines * i; int endY = (std::min)(numScanlines * (i + 1), exrImage->height); int h = endY - startY; std::vector<unsigned char> buf(exrImage->width * h * pixelDataSize); for (int c = 0; c < exrImage->num_channels; c++) { if (exrImage->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exrImage->width; x++) { FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exrImage->images)[c][(y + startY) * exrImage->width + x]; FP32 f32 = half_to_float(h16); if (isBigEndian) { swap4(reinterpret_cast<unsigned int *>(&f32.f)); } // Assume increasing Y float *linePtr = reinterpret_cast<float *>( &buf.at(pixelDataSize * y * exrImage->width + channelOffsetList[c] * exrImage->width)); linePtr[x] = f32.f; } } } else if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { for (int x = 0; x < exrImage->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exrImage->images)[c][(y + startY) * exrImage->width + x]; if (isBigEndian) { swap2(&val); } // Assume increasing Y unsigned short *linePtr = reinterpret_cast<unsigned short *>( &buf.at(pixelDataSize * y * exrImage->width + channelOffsetList[c] * exrImage->width)); linePtr[x] = val; } } } else { assert(0); } } else if (exrImage->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { for (int x = 0; x < exrImage->width; x++) { FP32 f32; f32.f = reinterpret_cast<float **>( exrImage->images)[c][(y + startY) * exrImage->width + x]; FP16 h16; h16 = float_to_half_full(f32); if (isBigEndian) { swap2(reinterpret_cast<unsigned short *>(&h16.u)); } // Assume increasing Y unsigned short *linePtr = reinterpret_cast<unsigned short *>( &buf.at(pixelDataSize * y * exrImage->width + channelOffsetList[c] * exrImage->width)); linePtr[x] = h16.u; } } } else if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exrImage->width; x++) { float val = reinterpret_cast<float **>( exrImage->images)[c][(y + startY) * exrImage->width + x]; if (isBigEndian) { swap4(reinterpret_cast<unsigned int *>(&val)); } // Assume increasing Y float *linePtr = reinterpret_cast<float *>( &buf.at(pixelDataSize * y * exrImage->width + channelOffsetList[c] * exrImage->width)); linePtr[x] = val; } } } else { assert(0); } } else if (exrImage->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exrImage->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exrImage->images)[c][(y + startY) * exrImage->width + x]; if (isBigEndian) { swap4(&val); } // Assume increasing Y unsigned int *linePtr = reinterpret_cast<unsigned int *>( &buf.at(pixelDataSize * y * exrImage->width + channelOffsetList[c] * exrImage->width)); linePtr[x] = val; } } } } if (exrImage->compression == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int dataLen = (unsigned int)buf.size(); memcpy(&header.at(0), &startY, sizeof(int)); memcpy(&header.at(4), &dataLen, sizeof(unsigned int)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&header.at(0))); swap4(reinterpret_cast<unsigned int *>(&header.at(4))); } dataList[i].insert(dataList[i].end(), header.begin(), header.end()); dataList[i].insert(dataList[i].end(), buf.begin(), buf.begin() + dataLen); } else if ((exrImage->compression == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exrImage->compression == TINYEXR_COMPRESSIONTYPE_ZIP)) { std::vector<unsigned char> block(miniz::mz_compressBound(buf.size())); unsigned long long outSize = block.size(); CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size()); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int dataLen = outSize; // truncate memcpy(&header.at(0), &startY, sizeof(int)); memcpy(&header.at(4), &dataLen, sizeof(unsigned int)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&header.at(0))); swap4(reinterpret_cast<unsigned int *>(&header.at(4))); } dataList[i].insert(dataList[i].end(), header.begin(), header.end()); dataList[i].insert(dataList[i].end(), block.begin(), block.begin() + dataLen); } else if (exrImage->compression == TINYEXR_COMPRESSIONTYPE_PIZ) { unsigned int bufLen = 1024 + 1.2 * (unsigned int)buf.size(); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exrImage->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int dataLen = outSize; memcpy(&header.at(0), &startY, sizeof(int)); memcpy(&header.at(4), &dataLen, sizeof(unsigned int)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&header.at(0))); swap4(reinterpret_cast<unsigned int *>(&header.at(4))); } dataList[i].insert(dataList[i].end(), header.begin(), header.end()); dataList[i].insert(dataList[i].end(), block.begin(), block.begin() + dataLen); } else { assert(0); } } // omp parallel for (int i = 0; i < numBlocks; i++) { data.insert(data.end(), dataList[i].begin(), dataList[i].end()); offsets[i] = offset; if (IsBigEndian()) { swap8(reinterpret_cast<unsigned long long *>(&offsets[i])); } offset += dataList[i].size(); } { memory.insert(memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(unsigned long long) * numBlocks); } { memory.insert(memory.end(), data.begin(), data.end()); } assert(memory.size() > 0); (*memory_out) = (unsigned char *)malloc(memory.size()); memcpy((*memory_out), &memory.at(0), memory.size()); return memory.size(); // OK } int SaveMultiChannelEXRToFile(const EXRImage *exrImage, const char *filename, const char **err) { if (exrImage == NULL || filename == NULL || exrImage->compression < 0 || exrImage->compression > TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "Invalid argument."; } return -1; } FILE *fp = fopen(filename, "wb"); if (!fp) { if (err) { (*err) = "Cannot write a file."; } return -1; } unsigned char *mem = NULL; size_t mem_size = SaveMultiChannelEXRToMemory(exrImage, &mem, err); if ((mem_size > 0) && mem) { fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); return 0; // OK } int LoadDeepEXR(DeepImage *deepImage, const char *filename, const char **err) { if (deepImage == NULL) { if (err) { (*err) = "Invalid argument."; } return -1; } FILE *fp = fopen(filename, "rb"); if (!fp) { if (err) { (*err) = "Cannot read file."; } return -1; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = ftell(fp); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); if (err) { (*err) = "File size is zero."; } return -1; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { if (err) { (*err) = "Header mismatch."; } return -3; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { if (err) { (*err) = "Unsupported version or scanline."; } return -4; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int numScanlineBlocks = 1; // 16 for ZIP compression. int compressionType = -1; int numChannels = -1; std::vector<ChannelInfo> channels; // Read attributes for (;;) { std::string attrName; std::string attrType; std::vector<unsigned char> data; const char *marker_next = ReadAttribute(attrName, attrType, data, marker); if (marker_next == NULL) { marker++; // skip '\0' break; } if (attrName.compare("compression") == 0) { // must be 0:No compression, 1: RLE, 2: ZIPs or 3: ZIP if (data[0] > 3) { if (err) { (*err) = "Unsupported compression type."; } return -5; } compressionType = data[0]; if (compressionType == 3) { // ZIP numScanlineBlocks = 16; } } else if (attrName.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int ReadChannelInfo(channels, data); numChannels = channels.size(); if (numChannels < 1) { if (err) { (*err) = "Invalid channels format."; } return -6; } } else if (attrName.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&dx)); swap4(reinterpret_cast<unsigned int *>(&dy)); swap4(reinterpret_cast<unsigned int *>(&dw)); swap4(reinterpret_cast<unsigned int *>(&dh)); } } else if (attrName.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&x)); swap4(reinterpret_cast<unsigned int *>(&y)); swap4(reinterpret_cast<unsigned int *>(&w)); swap4(reinterpret_cast<unsigned int *>(&h)); } } marker = marker_next; } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(numChannels >= 1); int dataWidth = dw - dx + 1; int dataHeight = dh - dy + 1; std::vector<float> image(dataWidth * dataHeight * 4); // 4 = RGBA // Read offset tables. int numBlocks = dataHeight / numScanlineBlocks; if (numBlocks * numScanlineBlocks < dataHeight) { numBlocks++; } std::vector<long long> offsets(numBlocks); for (int y = 0; y < numBlocks; y++) { long long offset; memcpy(&offset, marker, sizeof(long long)); if (IsBigEndian()) { swap8(reinterpret_cast<unsigned long long *>(&offset)); } marker += sizeof(long long); // = 8 offsets[y] = offset; } if (compressionType != 0 && compressionType != 2 && compressionType != 3) { if (err) { (*err) = "Unsupported format."; } return -10; } deepImage->image = (float ***)malloc(sizeof(float **) * numChannels); for (int c = 0; c < numChannels; c++) { deepImage->image[c] = (float **)malloc(sizeof(float *) * dataHeight); for (int y = 0; y < dataHeight; y++) { } } deepImage->offset_table = (int **)malloc(sizeof(int *) * dataHeight); for (int y = 0; y < dataHeight; y++) { deepImage->offset_table[y] = (int *)malloc(sizeof(int) * dataWidth); } for (int y = 0; y < numBlocks; y++) { const unsigned char *dataPtr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int lineNo; long long packedOffsetTableSize; long long packedSampleDataSize; long long unpackedSampleDataSize; memcpy(&lineNo, dataPtr, sizeof(int)); memcpy(&packedOffsetTableSize, dataPtr + 4, sizeof(long long)); memcpy(&packedSampleDataSize, dataPtr + 12, sizeof(long long)); memcpy(&unpackedSampleDataSize, dataPtr + 20, sizeof(long long)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&lineNo)); swap8(reinterpret_cast<unsigned long long *>(&packedOffsetTableSize)); swap8(reinterpret_cast<unsigned long long *>(&packedSampleDataSize)); swap8(reinterpret_cast<unsigned long long *>(&unpackedSampleDataSize)); } std::vector<int> pixelOffsetTable(dataWidth); // decode pixel offset table. { unsigned long dstLen = pixelOffsetTable.size() * sizeof(int); DecompressZip(reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), dstLen, dataPtr + 28, packedOffsetTableSize); assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (int i = 0; i < dataWidth; i++) { deepImage->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sampleData(unpackedSampleDataSize); // decode sample data. { unsigned long dstLen = unpackedSampleDataSize; DecompressZip(reinterpret_cast<unsigned char *>(&sampleData.at(0)), dstLen, dataPtr + 28 + packedOffsetTableSize, packedSampleDataSize); assert(dstLen == (unsigned long)unpackedSampleDataSize); } // decode sample int sampleSize = -1; std::vector<int> channelOffsetList(numChannels); { int channelOffset = 0; for (int i = 0; i < numChannels; i++) { channelOffsetList[i] = channelOffset; if (channels[i].pixelType == TINYEXR_PIXELTYPE_UINT) { // UINT channelOffset += 4; } else if (channels[i].pixelType == TINYEXR_PIXELTYPE_HALF) { // half channelOffset += 2; } else if (channels[i].pixelType == TINYEXR_PIXELTYPE_FLOAT) { // float channelOffset += 4; } else { assert(0); } } sampleSize = channelOffset; } assert(sampleSize >= 2); assert((size_t)(pixelOffsetTable[dataWidth - 1] * sampleSize) == sampleData.size()); int samplesPerLine = sampleData.size() / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { unsigned long long dataOffset = 0; for (int c = 0; c < numChannels; c++) { deepImage->image[c][y] = (float *)malloc(sizeof(float) * samplesPerLine); if (channels[c].pixelType == 0) { // UINT for (int x = 0; x < samplesPerLine; x++) { unsigned int ui = *reinterpret_cast<unsigned int *>( &sampleData.at(dataOffset + x * sizeof(int))); deepImage->image[c][y][x] = (float)ui; // @fixme } dataOffset += sizeof(unsigned int) * samplesPerLine; } else if (channels[c].pixelType == 1) { // half for (int x = 0; x < samplesPerLine; x++) { FP16 f16; f16.u = *reinterpret_cast<unsigned short *>( &sampleData.at(dataOffset + x * sizeof(short))); FP32 f32 = half_to_float(f16); deepImage->image[c][y][x] = f32.f; } dataOffset += sizeof(short) * samplesPerLine; } else { // float for (int x = 0; x < samplesPerLine; x++) { float f = *reinterpret_cast<float *>( &sampleData.at(dataOffset + x * sizeof(float))); deepImage->image[c][y][x] = f; } dataOffset += sizeof(float) * samplesPerLine; } } } } // y deepImage->width = dataWidth; deepImage->height = dataHeight; deepImage->channel_names = (const char **)malloc(sizeof(const char *) * numChannels); for (int c = 0; c < numChannels; c++) { #ifdef _WIN32 deepImage->channel_names[c] = _strdup(channels[c].name.c_str()); #else deepImage->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deepImage->num_channels = numChannels; return 0; // OK } int SaveDeepEXR(const DeepImage *deepImage, const char *filename, const char **err) { if (deepImage == NULL || filename == NULL) { if (err) { (*err) = "Invalid argument."; } return -1; } FILE *fp = fopen(filename, "rb"); if (!fp) { if (err) { (*err) = "Cannot write file."; } return -1; } // Write header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; size_t n = fwrite(header, 1, 4, fp); if (n != 4) { if (err) { (*err) = "Header write failed."; } fclose(fp); return -3; } } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) const char data[] = {2, 8, 0, 0}; size_t n = fwrite(data, 1, 4, fp); if (n != 4) { if (err) { (*err) = "Flag write failed."; } fclose(fp); return -3; } } // Write attributes. { int data = 2; // ZIPS WriteAttribute(fp, "compression", "compression", reinterpret_cast<const unsigned char *>(&data), sizeof(int)); } { int data[4] = {0, 0, deepImage->width - 1, deepImage->height - 1}; WriteAttribute(fp, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); WriteAttribute(fp, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } int numScanlineBlocks = 1; // Write offset tables. int numBlocks = deepImage->height / numScanlineBlocks; if (numBlocks * numScanlineBlocks < deepImage->height) { numBlocks++; } #if 0 // @todo std::vector<long long> offsets(numBlocks); //std::vector<int> pixelOffsetTable(dataWidth); // compress pixel offset table. { unsigned long dstLen = pixelOffsetTable.size() * sizeof(int); Compresses(reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), dstLen, dataPtr + 28, packedOffsetTableSize); assert(dstLen == pixelOffsetTable.size() * sizeof(int)); // int ret = // miniz::mz_uncompress(reinterpret_cast<unsigned char // *>(&pixelOffsetTable.at(0)), &dstLen, dataPtr + 28, // packedOffsetTableSize); // printf("ret = %d, dstLen = %d\n", ret, (int)dstLen); // for (int i = 0; i < dataWidth; i++) { // printf("offt[%d] = %d\n", i, pixelOffsetTable[i]); deepImage->offset_table[y][i] = pixelOffsetTable[i]; } } for (int y = 0; y < numBlocks; y++) { //long long offset = *(reinterpret_cast<const long long *>(marker)); // printf("offset[%d] = %lld\n", y, offset); //marker += sizeof(long long); // = 8 offsets[y] = offset; } // Write offset table. fwrite(&offsets.at(0), sizeof(long long), numBlocks, fp); for (int y = 0; y < numBlocks; y++) { const unsigned char *dataPtr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int lineNo = *reinterpret_cast<const int *>(dataPtr); long long packedOffsetTableSize = *reinterpret_cast<const long long *>(dataPtr + 4); long long packedSampleDataSize = *reinterpret_cast<const long long *>(dataPtr + 12); long long unpackedSampleDataSize = *reinterpret_cast<const long long *>(dataPtr + 20); // printf("line: %d, %lld/%lld/%lld\n", lineNo, packedOffsetTableSize, // packedSampleDataSize, unpackedSampleDataSize); int endLineNo = (std::min)(lineNo + numScanlineBlocks, dataHeight); int numLines = endLineNo - lineNo; // printf("numLines: %d\n", numLines); std::vector<int> pixelOffsetTable(dataWidth); // decode pixel offset table. { unsigned long dstLen = pixelOffsetTable.size() * sizeof(int); DecompressZip(reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), dstLen, dataPtr + 28, packedOffsetTableSize); assert(dstLen == pixelOffsetTable.size() * sizeof(int)); // int ret = // miniz::mz_uncompress(reinterpret_cast<unsigned char // *>(&pixelOffsetTable.at(0)), &dstLen, dataPtr + 28, // packedOffsetTableSize); // printf("ret = %d, dstLen = %d\n", ret, (int)dstLen); // for (int i = 0; i < dataWidth; i++) { // printf("offt[%d] = %d\n", i, pixelOffsetTable[i]); deepImage->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sampleData(unpackedSampleDataSize); // decode sample data. { unsigned long dstLen = unpackedSampleDataSize; // printf("dstLen = %d\n", dstLen); // printf("srcLen = %d\n", packedSampleDataSize); DecompressZip(reinterpret_cast<unsigned char *>(&sampleData.at(0)), dstLen, dataPtr + 28 + packedOffsetTableSize, packedSampleDataSize); assert(dstLen == unpackedSampleDataSize); } // decode sample int sampleSize = -1; std::vector<int> channelOffsetList(numChannels); { int channelOffset = 0; for (int i = 0; i < numChannels; i++) { // printf("offt[%d] = %d\n", i, channelOffset); channelOffsetList[i] = channelOffset; if (channels[i].pixelType == 0) { // UINT channelOffset += 4; } else if (channels[i].pixelType == 1) { // half channelOffset += 2; } else if (channels[i].pixelType == 2) { // float channelOffset += 4; } else { assert(0); } } sampleSize = channelOffset; } assert(sampleSize >= 2); assert(pixelOffsetTable[dataWidth - 1] * sampleSize == sampleData.size()); int samplesPerLine = sampleData.size() / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { unsigned long long dataOffset = 0; for (int c = 0; c < numChannels; c++) { deepImage->image[c][y] = (float *)malloc(sizeof(float) * samplesPerLine); // unsigned int channelOffset = channelOffsetList[c]; // unsigned int i = channelOffset; // printf("channel = %d. name = %s. ty = %d\n", c, // channels[c].name.c_str(), channels[c].pixelType); // printf("dataOffset = %d\n", (int)dataOffset); if (channels[c].pixelType == 0) { // UINT for (int x = 0; x < samplesPerLine; x++) { unsigned int ui = *reinterpret_cast<unsigned int *>( &sampleData.at(dataOffset + x * sizeof(int))); deepImage->image[c][y][x] = (float)ui; // @fixme } dataOffset += sizeof(unsigned int) * samplesPerLine; } else if (channels[c].pixelType == 1) { // half for (int x = 0; x < samplesPerLine; x++) { FP16 f16; f16.u = *reinterpret_cast<unsigned short *>( &sampleData.at(dataOffset + x * sizeof(short))); FP32 f32 = half_to_float(f16); deepImage->image[c][y][x] = f32.f; // printf("c[%d] f(half) = %f (0x%08x)\n", c, f32.f, f16.u); } dataOffset += sizeof(short) * samplesPerLine; } else { // float for (int x = 0; x < samplesPerLine; x++) { float f = *reinterpret_cast<float *>( &sampleData.at(dataOffset + x * sizeof(float))); // printf(" f = %f(0x%08x)\n", f, *((unsigned int *)&f)); deepImage->image[c][y][x] = f; } dataOffset += sizeof(float) * samplesPerLine; } } // printf("total: %d\n", dataOffset); } } // y #endif fclose(fp); return 0; // OK } void InitEXRImage(EXRImage *exrImage) { if (exrImage == NULL) { return; } exrImage->num_custom_attributes = 0; exrImage->num_channels = 0; exrImage->channel_names = NULL; exrImage->images = NULL; exrImage->pixel_types = NULL; exrImage->requested_pixel_types = NULL; exrImage->compression = TINYEXR_COMPRESSIONTYPE_ZIP; } int FreeEXRImage(EXRImage *exrImage) { if (exrImage == NULL) { return -1; // Err } for (int i = 0; i < exrImage->num_channels; i++) { if (exrImage->channel_names && exrImage->channel_names[i]) { free((char *)exrImage->channel_names[i]); // remove const } if (exrImage->images && exrImage->images[i]) { free(exrImage->images[i]); } } if (exrImage->channel_names) { free(exrImage->channel_names); } if (exrImage->images) { free(exrImage->images); } if (exrImage->pixel_types) { free(exrImage->pixel_types); } if (exrImage->requested_pixel_types) { free(exrImage->requested_pixel_types); } for (int i = 0; i < exrImage->num_custom_attributes; i++) { if (exrImage->custom_attributes[i].name) { free(exrImage->custom_attributes[i].name); } if (exrImage->custom_attributes[i].type) { free(exrImage->custom_attributes[i].type); } if (exrImage->custom_attributes[i].value) { free(exrImage->custom_attributes[i].value); } } return 0; } int ParseMultiChannelEXRHeaderFromFile(EXRImage *exrImage, const char *filename, const char **err) { if (exrImage == NULL) { if (err) { (*err) = "Invalid argument."; } return -1; } FILE *fp = fopen(filename, "rb"); if (!fp) { if (err) { (*err) = "Cannot read file."; } return -1; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = ftell(fp); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return ParseMultiChannelEXRHeaderFromMemory(exrImage, &buf.at(0), err); } int ParseMultiChannelEXRHeaderFromMemory(EXRImage *exrImage, const unsigned char *memory, const char **err) { if (exrImage == NULL || memory == NULL) { if (err) { (*err) = "Invalid argument."; } return -1; } const char *buf = reinterpret_cast<const char *>(memory); const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { if (err) { (*err) = "Header mismatch."; } return -3; } marker += 4; } // Version, scanline. { // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 0 || marker[2] != 0 || marker[3] != 0) { if (err) { (*err) = "Unsupported version or scanline."; } return -4; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int numChannels = -1; int displayWindow[4] = {-1, -1, -1, -1}; // @fixme. float screenWindowCenter[2] = {0.0f, 0.0f}; // @fixme float screenWindowWidth = 1.0f; // @fixme float pixelAspectRatio = 1.0f; unsigned char lineOrder = 0; // 0 -> increasing y; 1 -> decreasing std::vector<ChannelInfo> channels; int compressionType = 0; // @fixme int numCustomAttributes = 0; std::vector<EXRAttribute> customAttribs; // Read attributes for (;;) { std::string attrName; std::string attrType; std::vector<unsigned char> data; const char *marker_next = ReadAttribute(attrName, attrType, data, marker); if (marker_next == NULL) { marker++; // skip '\0' break; } if (attrName.compare("compression") == 0) { // must be 0:No compression, 1: RLE, 2: ZIPs, 3: ZIP or 4: PIZ if (data[0] > TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "Unsupported compression type."; } return -5; } compressionType = data[0]; } else if (attrName.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int ReadChannelInfo(channels, data); numChannels = channels.size(); if (numChannels < 1) { if (err) { (*err) = "Invalid channels format."; } return -6; } } else if (attrName.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&dx)); swap4(reinterpret_cast<unsigned int *>(&dy)); swap4(reinterpret_cast<unsigned int *>(&dw)); swap4(reinterpret_cast<unsigned int *>(&dh)); } } else if (attrName.compare("displayWindow") == 0) { memcpy(&displayWindow[0], &data.at(0), sizeof(int)); memcpy(&displayWindow[1], &data.at(4), sizeof(int)); memcpy(&displayWindow[2], &data.at(8), sizeof(int)); memcpy(&displayWindow[3], &data.at(12), sizeof(int)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&displayWindow[0])); swap4(reinterpret_cast<unsigned int *>(&displayWindow[1])); swap4(reinterpret_cast<unsigned int *>(&displayWindow[2])); swap4(reinterpret_cast<unsigned int *>(&displayWindow[3])); } } else if (attrName.compare("lineOrder") == 0) { int order; memcpy(&order, &data.at(0), sizeof(int)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&order)); } lineOrder = (unsigned char)order; } else if (attrName.compare("pixelAspectRatio") == 0) { memcpy(&pixelAspectRatio, &data.at(0), sizeof(float)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&pixelAspectRatio)); } } else if (attrName.compare("screenWindowCenter") == 0) { memcpy(&screenWindowCenter[0], &data.at(0), sizeof(float)); memcpy(&screenWindowCenter[1], &data.at(4), sizeof(float)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&screenWindowCenter[0])); swap4(reinterpret_cast<unsigned int *>(&screenWindowCenter[1])); } } else if (attrName.compare("screenWindowWidth") == 0) { memcpy(&screenWindowWidth, &data.at(0), sizeof(float)); if (IsBigEndian()) { swap4(reinterpret_cast<unsigned int *>(&screenWindowWidth)); } } else { // Custom attribute(up to TINYEXR_MAX_ATTRIBUTES) if (numCustomAttributes < TINYEXR_MAX_ATTRIBUTES) { EXRAttribute attrib; attrib.name = strdup(attrName.c_str()); attrib.type = strdup(attrType.c_str()); attrib.size = data.size(); attrib.value = (unsigned char *)malloc(data.size()); memcpy((char *)attrib.value, &data.at(0), data.size()); customAttribs.push_back(attrib); } } marker = marker_next; } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(numChannels >= 1); int dataWidth = dw - dx + 1; int dataHeight = dh - dy + 1; { exrImage->channel_names = (const char **)malloc(sizeof(const char *) * numChannels); for (int c = 0; c < numChannels; c++) { #ifdef _WIN32 exrImage->channel_names[c] = _strdup(channels[c].name.c_str()); #else exrImage->channel_names[c] = strdup(channels[c].name.c_str()); #endif } exrImage->num_channels = numChannels; exrImage->width = dataWidth; exrImage->height = dataHeight; exrImage->pixel_aspect_ratio = pixelAspectRatio; exrImage->screen_window_center[0] = screenWindowCenter[0]; exrImage->screen_window_center[1] = screenWindowCenter[1]; exrImage->screen_window_width = screenWindowWidth; exrImage->display_window[0] = displayWindow[0]; exrImage->display_window[1] = displayWindow[1]; exrImage->display_window[2] = displayWindow[2]; exrImage->display_window[3] = displayWindow[3]; exrImage->data_window[0] = dx; exrImage->data_window[1] = dy; exrImage->data_window[2] = dw; exrImage->data_window[3] = dh; exrImage->line_order = lineOrder; exrImage->compression = compressionType; exrImage->pixel_types = (int *)malloc(sizeof(int) * numChannels); for (int c = 0; c < numChannels; c++) { exrImage->pixel_types[c] = channels[c].pixelType; } // Initially fill with values of `pixel-types` exrImage->requested_pixel_types = (int *)malloc(sizeof(int) * numChannels); for (int c = 0; c < numChannels; c++) { exrImage->requested_pixel_types[c] = channels[c].pixelType; } } if (numCustomAttributes > 0) { assert(customAttribs.size() < TINYEXR_MAX_ATTRIBUTES); exrImage->num_custom_attributes = numCustomAttributes; for (int i = 0; i < (int)customAttribs.size(); i++) { exrImage->custom_attributes[i] = customAttribs[i]; } } return 0; // OK } #ifdef _MSC_VER #pragma warning(pop) #endif #endif #endif // __TINYEXR_H__
dataset.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_DATASET_H_ #define LIGHTGBM_DATASET_H_ #include <LightGBM/config.h> #include <LightGBM/feature_group.h> #include <LightGBM/meta.h> #include <LightGBM/utils/openmp_wrapper.h> #include <LightGBM/utils/random.h> #include <LightGBM/utils/text_reader.h> #include <string> #include <functional> #include <memory> #include <mutex> #include <unordered_set> #include <utility> #include <vector> namespace LightGBM { /*! \brief forward declaration */ class DatasetLoader; /*! * \brief This class is used to store some meta(non-feature) data for training data, * e.g. labels, weights, initial scores, query level informations. * * Some details: * 1. Label, used for training. * 2. Weights, weighs of records, optional * 3. Query Boundaries, necessary for lambdarank. * The documents of i-th query is in [ query_boundaries[i], query_boundaries[i+1] ) * 4. Query Weights, auto calculate by weights and query_boundaries(if both of them are existed) * the weight for i-th query is sum(query_boundaries[i] , .., query_boundaries[i+1]) / (query_boundaries[i + 1] - query_boundaries[i+1]) * 5. Initial score. optional. if existing, the model will boost from this score, otherwise will start from 0. */ class Metadata { public: /*! * \brief Null constructor */ Metadata(); /*! * \brief Initialization will load query level informations, since it is need for sampling data * \param data_filename Filename of data */ void Init(const char* data_filename); /*! * \brief init as subset * \param metadata Filename of data * \param used_indices * \param num_used_indices */ void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices); /*! * \brief Initial with binary memory * \param memory Pointer to memory */ void LoadFromMemory(const void* memory); /*! \brief Destructor */ ~Metadata(); /*! * \brief Initial work, will allocate space for label, weight(if exists) and query(if exists) * \param num_data Number of training data * \param weight_idx Index of weight column, < 0 means doesn't exists * \param query_idx Index of query id column, < 0 means doesn't exists */ void Init(data_size_t num_data, int weight_idx, int query_idx); /*! * \brief Partition label by used indices * \param used_indices Indices of local used */ void PartitionLabel(const std::vector<data_size_t>& used_indices); /*! * \brief Partition meta data according to local used indices if need * \param num_all_data Number of total training data, including other machines' data on parallel learning * \param used_data_indices Indices of local used training data */ void CheckOrPartition(data_size_t num_all_data, const std::vector<data_size_t>& used_data_indices); void SetLabel(const label_t* label, data_size_t len); void SetWeights(const label_t* weights, data_size_t len); void SetQuery(const data_size_t* query, data_size_t len); /*! * \brief Set initial scores * \param init_score Initial scores, this class will manage memory for init_score. */ void SetInitScore(const double* init_score, data_size_t len); /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter* writer) const; /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const; /*! * \brief Get pointer of label * \return Pointer of label */ inline const label_t* label() const { return label_.data(); } /*! * \brief Set label for one record * \param idx Index of this record * \param value Label value of this record */ inline void SetLabelAt(data_size_t idx, label_t value) { label_[idx] = value; } /*! * \brief Set Weight for one record * \param idx Index of this record * \param value Weight value of this record */ inline void SetWeightAt(data_size_t idx, label_t value) { weights_[idx] = value; } /*! * \brief Set Query Id for one record * \param idx Index of this record * \param value Query Id value of this record */ inline void SetQueryAt(data_size_t idx, data_size_t value) { queries_[idx] = static_cast<data_size_t>(value); } /*! * \brief Get weights, if not exists, will return nullptr * \return Pointer of weights */ inline const label_t* weights() const { if (!weights_.empty()) { return weights_.data(); } else { return nullptr; } } /*! * \brief Get data boundaries on queries, if not exists, will return nullptr * we assume data will order by query, * the interval of [query_boundaris[i], query_boundaris[i+1]) * is the data indices for query i. * \return Pointer of data boundaries on queries */ inline const data_size_t* query_boundaries() const { if (!query_boundaries_.empty()) { return query_boundaries_.data(); } else { return nullptr; } } /*! * \brief Get Number of queries * \return Number of queries */ inline data_size_t num_queries() const { return num_queries_; } /*! * \brief Get weights for queries, if not exists, will return nullptr * \return Pointer of weights for queries */ inline const label_t* query_weights() const { if (!query_weights_.empty()) { return query_weights_.data(); } else { return nullptr; } } /*! * \brief Get initial scores, if not exists, will return nullptr * \return Pointer of initial scores */ inline const double* init_score() const { if (!init_score_.empty()) { return init_score_.data(); } else { return nullptr; } } /*! * \brief Get size of initial scores */ inline int64_t num_init_score() const { return num_init_score_; } /*! \brief Disable copy */ Metadata& operator=(const Metadata&) = delete; /*! \brief Disable copy */ Metadata(const Metadata&) = delete; private: /*! \brief Load initial scores from file */ void LoadInitialScore(); /*! \brief Load wights from file */ void LoadWeights(); /*! \brief Load query boundaries from file */ void LoadQueryBoundaries(); /*! \brief Load query wights */ void LoadQueryWeights(); /*! \brief Filename of current data */ std::string data_filename_; /*! \brief Number of data */ data_size_t num_data_; /*! \brief Number of weights, used to check correct weight file */ data_size_t num_weights_; /*! \brief Label data */ std::vector<label_t> label_; /*! \brief Weights data */ std::vector<label_t> weights_; /*! \brief Query boundaries */ std::vector<data_size_t> query_boundaries_; /*! \brief Query weights */ std::vector<label_t> query_weights_; /*! \brief Number of querys */ data_size_t num_queries_; /*! \brief Number of Initial score, used to check correct weight file */ int64_t num_init_score_; /*! \brief Initial score */ std::vector<double> init_score_; /*! \brief Queries data */ std::vector<data_size_t> queries_; /*! \brief mutex for threading safe call */ std::mutex mutex_; bool weight_load_from_file_; bool query_load_from_file_; bool init_score_load_from_file_; }; /*! \brief Interface for Parser */ class Parser { public: /*! \brief virtual destructor */ virtual ~Parser() {} /*! * \brief Parse one line with label * \param str One line record, string format, should end with '\0' * \param out_features Output columns, store in (column_idx, values) * \param out_label Label will store to this if exists */ virtual void ParseOneLine(const char* str, std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0; virtual int NumFeatures() const = 0; /*! * \brief Create an object of parser, will auto choose the format depend on file * \param filename One Filename of data * \param num_features Pass num_features of this data file if you know, <=0 means don't know * \param label_idx index of label column * \return Object of parser */ static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx); }; struct TrainingShareStates { int num_threads = 0; bool is_colwise = true; bool is_use_subcol = false; bool is_use_subrow = false; bool is_subrow_copied = false; bool is_constant_hessian = true; const data_size_t* bagging_use_indices; data_size_t bagging_indices_cnt; int num_bin_aligned; std::unique_ptr<MultiValBin> multi_val_bin; std::unique_ptr<MultiValBin> multi_val_bin_subset; std::vector<uint32_t> hist_move_src; std::vector<uint32_t> hist_move_dest; std::vector<uint32_t> hist_move_size; std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>> hist_buf; void SetMultiValBin(MultiValBin* bin) { num_threads = OMP_NUM_THREADS(); if (bin == nullptr) { return; } multi_val_bin.reset(bin); num_bin_aligned = (bin->num_bin() + kAlignedSize - 1) / kAlignedSize * kAlignedSize; size_t new_size = static_cast<size_t>(num_bin_aligned) * 2 * num_threads; if (new_size > hist_buf.size()) { hist_buf.resize(static_cast<size_t>(num_bin_aligned) * 2 * num_threads); } } hist_t* TempBuf() { if (!is_use_subcol) { return nullptr; } return hist_buf.data() + hist_buf.size() - num_bin_aligned * 2; } void HistMove(const hist_t* src, hist_t* dest) { if (!is_use_subcol) { return; } #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(hist_move_src.size()); ++i) { std::copy_n(src + hist_move_src[i], hist_move_size[i], dest + hist_move_dest[i]); } } }; /*! \brief The main class of data set, * which are used to training or validation */ class Dataset { public: friend DatasetLoader; LIGHTGBM_EXPORT Dataset(); LIGHTGBM_EXPORT Dataset(data_size_t num_data); void Construct( std::vector<std::unique_ptr<BinMapper>>* bin_mappers, int num_total_features, const std::vector<std::vector<double>>& forced_bins, int** sample_non_zero_indices, double** sample_values, const int* num_per_col, int num_sample_col, size_t total_sample_cnt, const Config& io_config); /*! \brief Destructor */ LIGHTGBM_EXPORT ~Dataset(); LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const { if (num_features_ != other.num_features_) { return false; } if (num_total_features_ != other.num_total_features_) { return false; } if (label_idx_ != other.label_idx_) { return false; } for (int i = 0; i < num_features_; ++i) { if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) { return false; } } return true; } inline void FinishOneRow(int tid, data_size_t row_idx, const std::vector<bool>& is_feature_added) { if (is_finish_load_) { return; } for (auto fidx : feature_need_push_zeros_) { if (is_feature_added[fidx]) { continue; } const int group = feature2group_[fidx]; const int sub_feature = feature2subfeature_[fidx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, 0.0f); } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) { if (is_finish_load_) { return; } for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) { int feature_idx = used_feature_map_[i]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]); } } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) { if (is_finish_load_) { return; } std::vector<bool> is_feature_added(num_features_, false); for (auto& inner_data : feature_values) { if (inner_data.first >= num_total_features_) { continue; } int feature_idx = used_feature_map_[inner_data.first]; if (feature_idx >= 0) { is_feature_added[feature_idx] = true; const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second); } } FinishOneRow(tid, row_idx, is_feature_added); } inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) { feature_groups_[group]->PushData(tid, sub_feature, row_idx, value); } inline int RealFeatureIndex(int fidx) const { return real_feature_idx_[fidx]; } inline int InnerFeatureIndex(int col_idx) const { return used_feature_map_[col_idx]; } inline int Feature2Group(int feature_idx) const { return feature2group_[feature_idx]; } inline int Feture2SubFeature(int feature_idx) const { return feature2subfeature_[feature_idx]; } inline uint64_t GroupBinBoundary(int group_idx) const { return group_bin_boundaries_[group_idx]; } inline uint64_t NumTotalBin() const { return group_bin_boundaries_.back(); } inline std::vector<int> ValidFeatureIndices() const { std::vector<int> ret; for (int i = 0; i < num_total_features_; ++i) { if (used_feature_map_[i] >= 0) { ret.push_back(i); } } return ret; } void ReSize(data_size_t num_data); void CopySubrow(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data); MultiValBin* GetMultiBinFromSparseFeatures() const; MultiValBin* GetMultiBinFromAllFeatures() const; TrainingShareStates* GetShareStates( score_t* gradients, score_t* hessians, const std::vector<int8_t>& is_feature_used, bool is_constant_hessian, bool force_colwise, bool force_rowwise) const; LIGHTGBM_EXPORT void FinishLoad(); LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr); LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr); LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr); /*! * \brief Save current dataset into binary file, will save to "filename.bin" */ LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename); LIGHTGBM_EXPORT void DumpTextFile(const char* text_filename); LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset); LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset); void InitTrain(const std::vector<int8_t>& is_feature_used, TrainingShareStates* share_state) const; template <bool USE_INDICES, bool USE_HESSIAN> void ConstructHistogramsInner(const std::vector<int8_t>& is_feature_used, const data_size_t* data_indices, data_size_t num_data, const score_t* gradients, const score_t* hessians, score_t* ordered_gradients, score_t* ordered_hessians, TrainingShareStates* share_state, hist_t* hist_data) const; template <bool USE_INDICES, bool ORDERED> void ConstructHistogramsMultiVal(const data_size_t* data_indices, data_size_t num_data, const score_t* gradients, const score_t* hessians, TrainingShareStates* share_state, hist_t* hist_data) const; inline void ConstructHistograms( const std::vector<int8_t>& is_feature_used, const data_size_t* data_indices, data_size_t num_data, const score_t* gradients, const score_t* hessians, score_t* ordered_gradients, score_t* ordered_hessians, TrainingShareStates* share_state, hist_t* hist_data) const { if (num_data <= 0) { return; } bool use_indices = data_indices != nullptr && (num_data < num_data_); if (share_state->is_constant_hessian) { if (use_indices) { ConstructHistogramsInner<true, false>( is_feature_used, data_indices, num_data, gradients, hessians, ordered_gradients, ordered_hessians, share_state, hist_data); } else { ConstructHistogramsInner<false, false>( is_feature_used, data_indices, num_data, gradients, hessians, ordered_gradients, ordered_hessians, share_state, hist_data); } } else { if (use_indices) { ConstructHistogramsInner<true, true>( is_feature_used, data_indices, num_data, gradients, hessians, ordered_gradients, ordered_hessians, share_state, hist_data); } else { ConstructHistogramsInner<false, true>( is_feature_used, data_indices, num_data, gradients, hessians, ordered_gradients, ordered_hessians, share_state, hist_data); } } } void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, hist_t* data) const; inline data_size_t Split(int feature, const uint32_t* threshold, int num_threshold, bool default_left, const data_size_t* data_indices, data_size_t cnt, data_size_t* lte_indices, data_size_t* gt_indices) const { const int group = feature2group_[feature]; const int sub_feature = feature2subfeature_[feature]; return feature_groups_[group]->Split( sub_feature, threshold, num_threshold, default_left, data_indices, cnt, lte_indices, gt_indices); } inline int SubFeatureBinOffset(int i) const { const int sub_feature = feature2subfeature_[i]; if (sub_feature == 0) { return 1; } else { return 0; } } inline int FeatureNumBin(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin(); } inline int FeatureGroupNumBin(int group) const { return feature_groups_[group]->num_total_bin_; } inline const BinMapper* FeatureBinMapper(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature].get(); } inline const Bin* FeatureGroupBin(int group) const { return feature_groups_[group]->bin_data_.get(); } inline BinIterator* FeatureIterator(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->SubFeatureIterator(sub_feature); } inline BinIterator* FeatureGroupIterator(int group) const { return feature_groups_[group]->FeatureGroupIterator(); } inline bool IsMultiGroup(int i) const { return feature_groups_[i]->is_multi_val_; } inline double RealThreshold(int i, uint32_t threshold) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold); } // given a real threshold, find the closest threshold bin inline uint32_t BinThreshold(int i, double threshold_double) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double); } /*! * \brief Get meta data pointer * \return Pointer of meta data */ inline const Metadata& metadata() const { return metadata_; } /*! \brief Get Number of used features */ inline int num_features() const { return num_features_; } /*! \brief Get Number of feature groups */ inline int num_feature_groups() const { return num_groups_;} /*! \brief Get Number of total features */ inline int num_total_features() const { return num_total_features_; } /*! \brief Get the index of label column */ inline int label_idx() const { return label_idx_; } /*! \brief Get names of current data set */ inline const std::vector<std::string>& feature_names() const { return feature_names_; } inline void set_feature_names(const std::vector<std::string>& feature_names) { if (feature_names.size() != static_cast<size_t>(num_total_features_)) { Log::Fatal("Size of feature_names error, should equal with total number of features"); } feature_names_ = std::vector<std::string>(feature_names); std::unordered_set<std::string> feature_name_set; // replace ' ' in feature_names with '_' bool spaceInFeatureName = false; for (auto& feature_name : feature_names_) { // check json if (!Common::CheckAllowedJSON(feature_name)) { Log::Fatal("Do not support special JSON characters in feature name."); } if (feature_name.find(' ') != std::string::npos) { spaceInFeatureName = true; std::replace(feature_name.begin(), feature_name.end(), ' ', '_'); } if (feature_name_set.count(feature_name) > 0) { Log::Fatal("Feature (%s) appears more than one time.", feature_name.c_str()); } feature_name_set.insert(feature_name); } if (spaceInFeatureName) { Log::Warning("Find whitespaces in feature_names, replace with underlines"); } } inline std::vector<std::string> feature_infos() const { std::vector<std::string> bufs; for (int i = 0; i < num_total_features_; ++i) { int fidx = used_feature_map_[i]; if (fidx < 0) { bufs.push_back("none"); } else { const auto bin_mapper = FeatureBinMapper(fidx); bufs.push_back(bin_mapper->bin_info_string()); } } return bufs; } /*! \brief Get Number of data */ inline data_size_t num_data() const { return num_data_; } /*! \brief Disable copy */ Dataset& operator=(const Dataset&) = delete; /*! \brief Disable copy */ Dataset(const Dataset&) = delete; void AddFeaturesFrom(Dataset* other); private: std::string data_filename_; /*! \brief Store used features */ std::vector<std::unique_ptr<FeatureGroup>> feature_groups_; /*! \brief Mapper from real feature index to used index*/ std::vector<int> used_feature_map_; /*! \brief Number of used features*/ int num_features_; /*! \brief Number of total features*/ int num_total_features_; /*! \brief Number of total data*/ data_size_t num_data_; /*! \brief Store some label level data*/ Metadata metadata_; /*! \brief index of label column */ int label_idx_ = 0; /*! \brief store feature names */ std::vector<std::string> feature_names_; /*! \brief store feature names */ static const char* binary_file_token; int num_groups_; std::vector<int> real_feature_idx_; std::vector<int> feature2group_; std::vector<int> feature2subfeature_; std::vector<uint64_t> group_bin_boundaries_; std::vector<int> group_feature_start_; std::vector<int> group_feature_cnt_; bool is_finish_load_; int max_bin_; std::vector<int32_t> max_bin_by_feature_; std::vector<std::vector<double>> forced_bin_bounds_; int bin_construct_sample_cnt_; int min_data_in_bin_; bool use_missing_; bool zero_as_missing_; std::vector<int> feature_need_push_zeros_; }; } // namespace LightGBM #endif // LightGBM_DATA_H_
SingleBodyLink.c
int x; int main() { #pragma omp single { int x; } #pragma omp single { 13; } }
convolution_sgemm_pack8to1_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack8to1_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt) { #if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { extern void im2col_sgemm_pack8to1_int8_sse_avx512vnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); im2col_sgemm_pack8to1_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt); return; } #endif #if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { extern void im2col_sgemm_pack8to1_int8_sse_avxvnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); im2col_sgemm_pack8to1_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt); return; } #endif // Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; // permute Mat tmp; #if __AVX2__ if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator); #else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator); #endif { #if __AVX2__ int remain_size_start = 0; int nn_size = size >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; int64_t* tmpptr = tmp.channel(i / 4); for (int q = 0; q < inch; q++) { const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { __m256i _v = _mm256_loadu_si256((const __m256i*)img0); _mm256_storeu_si256((__m256i*)tmpptr, _v); tmpptr += 4; img0 += size; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #else int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 1; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __AVX2__ int64_t* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #else int64_t* tmpptr = tmp.channel(i / 2); #endif for (int q = 0; q < inch; q++) { const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { __m128i _v = _mm_loadu_si128((const __m128i*)img0); _mm_storeu_si128((__m128i*)tmpptr, _v); tmpptr += 2; img0 += size; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __AVX2__ int64_t* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #else int64_t* tmpptr = tmp.channel(i / 2 + i % 2); #endif for (int q = 0; q < inch; q++) { const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; int* outptr0 = top_blob.channel(p); int* outptr1 = top_blob.channel(p + 1); int* outptr2 = top_blob.channel(p + 2); int* outptr3 = top_blob.channel(p + 3); int i = 0; #if __AVX2__ for (; i + 3 < size; i += 4) { const signed char* tmpptr = tmp.channel(i / 4); const signed char* kptr0 = kernel.channel(p / 4); int nn = inch * maxk; // inch always > 0 __m256i _sum00_11 = _mm256_setzero_si256(); __m256i _sum10_01 = _mm256_setzero_si256(); __m256i _sum02_13 = _mm256_setzero_si256(); __m256i _sum12_03 = _mm256_setzero_si256(); __m256i _sum04_15 = _mm256_setzero_si256(); __m256i _sum14_05 = _mm256_setzero_si256(); __m256i _sum06_17 = _mm256_setzero_si256(); __m256i _sum16_07 = _mm256_setzero_si256(); int j = 0; for (; j < nn; j++) { __m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr); __m256i _val01_16 = _mm256_cvtepi8_epi16(_val01); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16)); __m256i _w01_16 = _mm256_cvtepi8_epi16(_w01); __m256i _w23_16 = _mm256_cvtepi8_epi16(_w23); __m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78); #if __AVXVNNI__ || __AVX512VNNI__ _sum00_11 = _mm256_dpwssd_epi32(_sum00_11, _val01_16, _w01_16); _sum10_01 = _mm256_dpwssd_epi32(_sum10_01, _val10_16, _w01_16); _sum02_13 = _mm256_dpwssd_epi32(_sum02_13, _val01_16, _w23_16); _sum12_03 = _mm256_dpwssd_epi32(_sum12_03, _val10_16, _w23_16); #else __m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16); __m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16); __m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16); __m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16); __m256i _sl02_13 = _mm256_mullo_epi16(_val01_16, _w23_16); __m256i _sh02_13 = _mm256_mulhi_epi16(_val01_16, _w23_16); __m256i _sl12_03 = _mm256_mullo_epi16(_val10_16, _w23_16); __m256i _sh12_03 = _mm256_mulhi_epi16(_val10_16, _w23_16); _sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpacklo_epi16(_sl00_11, _sh00_11)); _sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpacklo_epi16(_sl10_01, _sh10_01)); _sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpacklo_epi16(_sl02_13, _sh02_13)); _sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpacklo_epi16(_sl12_03, _sh12_03)); _sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpackhi_epi16(_sl00_11, _sh00_11)); _sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpackhi_epi16(_sl10_01, _sh10_01)); _sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpackhi_epi16(_sl02_13, _sh02_13)); _sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpackhi_epi16(_sl12_03, _sh12_03)); #endif __m128i _val23 = _mm_loadu_si128((const __m128i*)(tmpptr + 16)); __m256i _val23_16 = _mm256_cvtepi8_epi16(_val23); __m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78); #if __AVXVNNI__ || __AVX512VNNI__ _sum04_15 = _mm256_dpwssd_epi32(_sum04_15, _val23_16, _w01_16); _sum14_05 = _mm256_dpwssd_epi32(_sum14_05, _val32_16, _w01_16); _sum06_17 = _mm256_dpwssd_epi32(_sum06_17, _val23_16, _w23_16); _sum16_07 = _mm256_dpwssd_epi32(_sum16_07, _val32_16, _w23_16); #else __m256i _sl04_15 = _mm256_mullo_epi16(_val23_16, _w01_16); __m256i _sh04_15 = _mm256_mulhi_epi16(_val23_16, _w01_16); __m256i _sl14_05 = _mm256_mullo_epi16(_val32_16, _w01_16); __m256i _sh14_05 = _mm256_mulhi_epi16(_val32_16, _w01_16); __m256i _sl06_17 = _mm256_mullo_epi16(_val23_16, _w23_16); __m256i _sh06_17 = _mm256_mulhi_epi16(_val23_16, _w23_16); __m256i _sl16_07 = _mm256_mullo_epi16(_val32_16, _w23_16); __m256i _sh16_07 = _mm256_mulhi_epi16(_val32_16, _w23_16); _sum04_15 = _mm256_add_epi32(_sum04_15, _mm256_unpacklo_epi16(_sl04_15, _sh04_15)); _sum14_05 = _mm256_add_epi32(_sum14_05, _mm256_unpacklo_epi16(_sl14_05, _sh14_05)); _sum06_17 = _mm256_add_epi32(_sum06_17, _mm256_unpacklo_epi16(_sl06_17, _sh06_17)); _sum16_07 = _mm256_add_epi32(_sum16_07, _mm256_unpacklo_epi16(_sl16_07, _sh16_07)); _sum04_15 = _mm256_add_epi32(_sum04_15, _mm256_unpackhi_epi16(_sl04_15, _sh04_15)); _sum14_05 = _mm256_add_epi32(_sum14_05, _mm256_unpackhi_epi16(_sl14_05, _sh14_05)); _sum06_17 = _mm256_add_epi32(_sum06_17, _mm256_unpackhi_epi16(_sl06_17, _sh06_17)); _sum16_07 = _mm256_add_epi32(_sum16_07, _mm256_unpackhi_epi16(_sl16_07, _sh16_07)); #endif tmpptr += 32; kptr0 += 32; } // transpose 4x8 { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum00_11, _sum10_01); _tmp1 = _mm256_unpacklo_epi32(_sum02_13, _sum12_03); _tmp2 = _mm256_unpackhi_epi32(_sum00_11, _sum10_01); _tmp3 = _mm256_unpackhi_epi32(_sum02_13, _sum12_03); _sum00_11 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum10_01 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum02_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum12_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum04_15, _sum14_05); _tmp1 = _mm256_unpacklo_epi32(_sum06_17, _sum16_07); _tmp2 = _mm256_unpackhi_epi32(_sum04_15, _sum14_05); _tmp3 = _mm256_unpackhi_epi32(_sum06_17, _sum16_07); _sum04_15 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum14_05 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum06_17 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum16_07 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } _sum00_11 = _mm256_add_epi32(_sum00_11, _sum10_01); _sum02_13 = _mm256_add_epi32(_sum02_13, _sum12_03); _sum00_11 = _mm256_add_epi32(_sum00_11, _sum02_13); _sum04_15 = _mm256_add_epi32(_sum04_15, _sum14_05); _sum06_17 = _mm256_add_epi32(_sum06_17, _sum16_07); _sum04_15 = _mm256_add_epi32(_sum04_15, _sum06_17); __m256i _perm_mask = _mm256_set_epi32(6, 3, 4, 1, 7, 2, 5, 0); _sum00_11 = _mm256_permutevar8x32_epi32(_sum00_11, _perm_mask); _sum04_15 = _mm256_permutevar8x32_epi32(_sum04_15, _perm_mask); int sum[16]; _mm256_storeu_si256((__m256i*)sum, _sum00_11); _mm256_storeu_si256((__m256i*)(sum + 8), _sum04_15); outptr0[0] = sum[0]; outptr1[0] = sum[1]; outptr2[0] = sum[2]; outptr3[0] = sum[3]; outptr0[1] = sum[4]; outptr1[1] = sum[5]; outptr2[1] = sum[6]; outptr3[1] = sum[7]; outptr0[2] = sum[8]; outptr1[2] = sum[9]; outptr2[2] = sum[10]; outptr3[2] = sum[11]; outptr0[3] = sum[12]; outptr1[3] = sum[13]; outptr2[3] = sum[14]; outptr3[3] = sum[15]; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; } #endif for (; i + 1 < size; i += 2) { #if __AVX2__ const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p / 4); int nn = inch * maxk; // inch always > 0 #if __AVX2__ __m256i _sum00_11 = _mm256_setzero_si256(); __m256i _sum10_01 = _mm256_setzero_si256(); __m256i _sum02_13 = _mm256_setzero_si256(); __m256i _sum12_03 = _mm256_setzero_si256(); #else __m128i _sum00 = _mm_setzero_si128(); __m128i _sum01 = _mm_setzero_si128(); __m128i _sum02 = _mm_setzero_si128(); __m128i _sum03 = _mm_setzero_si128(); __m128i _sum10 = _mm_setzero_si128(); __m128i _sum11 = _mm_setzero_si128(); __m128i _sum12 = _mm_setzero_si128(); __m128i _sum13 = _mm_setzero_si128(); #endif int j = 0; for (; j < nn; j++) { #if __AVX2__ __m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr); __m256i _val01_16 = _mm256_cvtepi8_epi16(_val01); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16)); __m256i _w01_16 = _mm256_cvtepi8_epi16(_w01); __m256i _w23_16 = _mm256_cvtepi8_epi16(_w23); __m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78); #if __AVXVNNI__ || __AVX512VNNI__ _sum00_11 = _mm256_dpwssd_epi32(_sum00_11, _val01_16, _w01_16); _sum10_01 = _mm256_dpwssd_epi32(_sum10_01, _val10_16, _w01_16); _sum02_13 = _mm256_dpwssd_epi32(_sum02_13, _val01_16, _w23_16); _sum12_03 = _mm256_dpwssd_epi32(_sum12_03, _val10_16, _w23_16); #else __m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16); __m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16); __m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16); __m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16); __m256i _sl02_13 = _mm256_mullo_epi16(_val01_16, _w23_16); __m256i _sh02_13 = _mm256_mulhi_epi16(_val01_16, _w23_16); __m256i _sl12_03 = _mm256_mullo_epi16(_val10_16, _w23_16); __m256i _sh12_03 = _mm256_mulhi_epi16(_val10_16, _w23_16); _sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpacklo_epi16(_sl00_11, _sh00_11)); _sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpacklo_epi16(_sl10_01, _sh10_01)); _sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpacklo_epi16(_sl02_13, _sh02_13)); _sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpacklo_epi16(_sl12_03, _sh12_03)); _sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpackhi_epi16(_sl00_11, _sh00_11)); _sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpackhi_epi16(_sl10_01, _sh10_01)); _sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpackhi_epi16(_sl02_13, _sh02_13)); _sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpackhi_epi16(_sl12_03, _sh12_03)); #endif #else __m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr); __m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01); __m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01); __m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16)); __m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01); __m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23); __m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01); __m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01); __m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23); __m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23); __m128i _sl00 = _mm_mullo_epi16(_val0, _w0); __m128i _sh00 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl01 = _mm_mullo_epi16(_val0, _w1); __m128i _sh01 = _mm_mulhi_epi16(_val0, _w1); __m128i _sl02 = _mm_mullo_epi16(_val0, _w2); __m128i _sh02 = _mm_mulhi_epi16(_val0, _w2); __m128i _sl03 = _mm_mullo_epi16(_val0, _w3); __m128i _sh03 = _mm_mulhi_epi16(_val0, _w3); __m128i _sl10 = _mm_mullo_epi16(_val1, _w0); __m128i _sh10 = _mm_mulhi_epi16(_val1, _w0); __m128i _sl11 = _mm_mullo_epi16(_val1, _w1); __m128i _sh11 = _mm_mulhi_epi16(_val1, _w1); __m128i _sl12 = _mm_mullo_epi16(_val1, _w2); __m128i _sh12 = _mm_mulhi_epi16(_val1, _w2); __m128i _sl13 = _mm_mullo_epi16(_val1, _w3); __m128i _sh13 = _mm_mulhi_epi16(_val1, _w3); _sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00)); _sum01 = _mm_add_epi32(_sum01, _mm_unpacklo_epi16(_sl01, _sh01)); _sum02 = _mm_add_epi32(_sum02, _mm_unpacklo_epi16(_sl02, _sh02)); _sum03 = _mm_add_epi32(_sum03, _mm_unpacklo_epi16(_sl03, _sh03)); _sum00 = _mm_add_epi32(_sum00, _mm_unpackhi_epi16(_sl00, _sh00)); _sum01 = _mm_add_epi32(_sum01, _mm_unpackhi_epi16(_sl01, _sh01)); _sum02 = _mm_add_epi32(_sum02, _mm_unpackhi_epi16(_sl02, _sh02)); _sum03 = _mm_add_epi32(_sum03, _mm_unpackhi_epi16(_sl03, _sh03)); _sum10 = _mm_add_epi32(_sum10, _mm_unpacklo_epi16(_sl10, _sh10)); _sum11 = _mm_add_epi32(_sum11, _mm_unpacklo_epi16(_sl11, _sh11)); _sum12 = _mm_add_epi32(_sum12, _mm_unpacklo_epi16(_sl12, _sh12)); _sum13 = _mm_add_epi32(_sum13, _mm_unpacklo_epi16(_sl13, _sh13)); _sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl10, _sh10)); _sum11 = _mm_add_epi32(_sum11, _mm_unpackhi_epi16(_sl11, _sh11)); _sum12 = _mm_add_epi32(_sum12, _mm_unpackhi_epi16(_sl12, _sh12)); _sum13 = _mm_add_epi32(_sum13, _mm_unpackhi_epi16(_sl13, _sh13)); #endif tmpptr += 16; kptr0 += 32; } #if __AVX2__ // transpose 4x8 { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum00_11, _sum10_01); _tmp1 = _mm256_unpacklo_epi32(_sum02_13, _sum12_03); _tmp2 = _mm256_unpackhi_epi32(_sum00_11, _sum10_01); _tmp3 = _mm256_unpackhi_epi32(_sum02_13, _sum12_03); _sum00_11 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum10_01 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum02_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum12_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } _sum00_11 = _mm256_add_epi32(_sum00_11, _sum10_01); _sum02_13 = _mm256_add_epi32(_sum02_13, _sum12_03); _sum00_11 = _mm256_add_epi32(_sum00_11, _sum02_13); __m256i _perm_mask = _mm256_set_epi32(6, 3, 4, 1, 7, 2, 5, 0); _sum00_11 = _mm256_permutevar8x32_epi32(_sum00_11, _perm_mask); int sum[8]; _mm256_storeu_si256((__m256i*)sum, _sum00_11); #else // transpose 4x4 { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum00, _sum01); _tmp1 = _mm_unpacklo_epi32(_sum02, _sum03); _tmp2 = _mm_unpackhi_epi32(_sum00, _sum01); _tmp3 = _mm_unpackhi_epi32(_sum02, _sum03); _sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3); } { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum10, _sum11); _tmp1 = _mm_unpacklo_epi32(_sum12, _sum13); _tmp2 = _mm_unpackhi_epi32(_sum10, _sum11); _tmp3 = _mm_unpackhi_epi32(_sum12, _sum13); _sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3); } _sum00 = _mm_add_epi32(_sum00, _sum01); _sum02 = _mm_add_epi32(_sum02, _sum03); _sum10 = _mm_add_epi32(_sum10, _sum11); _sum12 = _mm_add_epi32(_sum12, _sum13); _sum00 = _mm_add_epi32(_sum00, _sum02); _sum10 = _mm_add_epi32(_sum10, _sum12); int sum[8]; _mm_storeu_si128((__m128i*)sum, _sum00); _mm_storeu_si128((__m128i*)(sum + 4), _sum10); #endif outptr0[0] = sum[0]; outptr1[0] = sum[1]; outptr2[0] = sum[2]; outptr3[0] = sum[3]; outptr0[1] = sum[4]; outptr1[1] = sum[5]; outptr2[1] = sum[6]; outptr3[1] = sum[7]; outptr0 += 2; outptr1 += 2; outptr2 += 2; outptr3 += 2; } for (; i < size; i++) { #if __AVX2__ const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p / 4); int nn = inch * maxk; // inch always > 0 #if __AVX2__ __m256i _sum0_1 = _mm256_setzero_si256(); __m256i _sum2_3 = _mm256_setzero_si256(); #else __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); #endif int j = 0; for (; j < nn; j++) { #if __AVX2__ __m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr); _val = _mm_cvtepi8_epi16(_val); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16)); __m256i _w01_16 = _mm256_cvtepi8_epi16(_w01); __m256i _w23_16 = _mm256_cvtepi8_epi16(_w23); __m256i _valval = _mm256_inserti128_si256(_mm256_castsi128_si256(_val), _val, 1); #if __AVXVNNI__ || __AVX512VNNI__ _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _valval, _w01_16); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _valval, _w23_16); #else __m256i _sl0_1 = _mm256_mullo_epi16(_valval, _w01_16); __m256i _sh0_1 = _mm256_mulhi_epi16(_valval, _w01_16); __m256i _sl2_3 = _mm256_mullo_epi16(_valval, _w23_16); __m256i _sh2_3 = _mm256_mulhi_epi16(_valval, _w23_16); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl0_1, _sh0_1)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl2_3, _sh2_3)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl0_1, _sh0_1)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl2_3, _sh2_3)); #endif #else __m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr); #if __SSE4_1__ _val = _mm_cvtepi8_epi16(_val); #else _val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val)); #endif __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16)); __m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01); __m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23); __m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01); __m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01); __m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23); __m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23); __m128i _sl0 = _mm_mullo_epi16(_val, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val, _w0); __m128i _sl1 = _mm_mullo_epi16(_val, _w1); __m128i _sh1 = _mm_mulhi_epi16(_val, _w1); __m128i _sl2 = _mm_mullo_epi16(_val, _w2); __m128i _sh2 = _mm_mulhi_epi16(_val, _w2); __m128i _sl3 = _mm_mullo_epi16(_val, _w3); __m128i _sh3 = _mm_mulhi_epi16(_val, _w3); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpacklo_epi16(_sl1, _sh1)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl2, _sh2)); _sum3 = _mm_add_epi32(_sum3, _mm_unpacklo_epi16(_sl3, _sh3)); _sum0 = _mm_add_epi32(_sum0, _mm_unpackhi_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1)); _sum2 = _mm_add_epi32(_sum2, _mm_unpackhi_epi16(_sl2, _sh2)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl3, _sh3)); #endif tmpptr += 8; kptr0 += 32; } #if __AVX2__ __m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0); __m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1); __m128i _sum2 = _mm256_extracti128_si256(_sum2_3, 0); __m128i _sum3 = _mm256_extracti128_si256(_sum2_3, 1); #endif // transpose 4x4 { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum0, _sum1); _tmp1 = _mm_unpacklo_epi32(_sum2, _sum3); _tmp2 = _mm_unpackhi_epi32(_sum0, _sum1); _tmp3 = _mm_unpackhi_epi32(_sum2, _sum3); _sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3); } _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); _sum0 = _mm_add_epi32(_sum0, _sum2); int sum[4]; _mm_storeu_si128((__m128i*)sum, _sum0); outptr0[0] = sum[0]; outptr1[0] = sum[1]; outptr2[0] = sum[2]; outptr3[0] = sum[3]; outptr0 += 1; outptr1 += 1; outptr2 += 1; outptr3 += 1; } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { int* outptr0 = top_blob.channel(p); int i = 0; #if __AVX2__ for (; i + 3 < size; i += 4) { const signed char* tmpptr = tmp.channel(i / 4); const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = inch * maxk; // inch always > 0 __m256i _sum0_2 = _mm256_setzero_si256(); __m256i _sum1_3 = _mm256_setzero_si256(); __m256i _sum4_6 = _mm256_setzero_si256(); __m256i _sum5_7 = _mm256_setzero_si256(); int j = 0; for (; j < nn; j++) { __m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr); __m128i _val23 = _mm_loadu_si128((const __m128i*)(tmpptr + 16)); __m256i _val01_16 = _mm256_cvtepi8_epi16(_val01); __m256i _val23_16 = _mm256_cvtepi8_epi16(_val23); __m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0); __m256i _w01_16 = _mm256_cvtepi8_epi16(_w01); _w01_16 = _mm256_permute4x64_epi64(_w01_16, _MM_SHUFFLE(1, 0, 1, 0)); __m256i _sl00_10 = _mm256_mullo_epi16(_val01_16, _w01_16); __m256i _sh00_10 = _mm256_mulhi_epi16(_val01_16, _w01_16); __m256i _sl20_30 = _mm256_mullo_epi16(_val23_16, _w01_16); __m256i _sh20_30 = _mm256_mulhi_epi16(_val23_16, _w01_16); _sum0_2 = _mm256_add_epi32(_sum0_2, _mm256_unpacklo_epi16(_sl00_10, _sh00_10)); _sum1_3 = _mm256_add_epi32(_sum1_3, _mm256_unpackhi_epi16(_sl00_10, _sh00_10)); _sum4_6 = _mm256_add_epi32(_sum4_6, _mm256_unpacklo_epi16(_sl20_30, _sh20_30)); _sum5_7 = _mm256_add_epi32(_sum5_7, _mm256_unpackhi_epi16(_sl20_30, _sh20_30)); tmpptr += 32; kptr0 += 8; } _sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3); _sum4_6 = _mm256_add_epi32(_sum4_6, _sum5_7); __m128i _sum0 = _mm256_extracti128_si256(_sum0_2, 0); __m128i _sum2 = _mm256_extracti128_si256(_sum0_2, 1); __m128i _sum4 = _mm256_extracti128_si256(_sum4_6, 1); __m128i _sum6 = _mm256_extracti128_si256(_sum4_6, 1); outptr0[0] = _mm_reduce_add_epi32(_sum0); outptr0[1] = _mm_reduce_add_epi32(_sum2); outptr0[2] = _mm_reduce_add_epi32(_sum4); outptr0[3] = _mm_reduce_add_epi32(_sum6); outptr0 += 4; } #endif for (; i + 1 < size; i += 2) { #if __AVX2__ const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = inch * maxk; // inch always > 0 #if __AVX2__ __m256i _sum0_2 = _mm256_setzero_si256(); __m256i _sum1_3 = _mm256_setzero_si256(); #else __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); #endif int j = 0; for (; j < nn; j++) { #if __AVX2__ __m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr); __m256i _val01_16 = _mm256_cvtepi8_epi16(_val01); __m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0); __m256i _w01_16 = _mm256_cvtepi8_epi16(_w01); _w01_16 = _mm256_permute4x64_epi64(_w01_16, _MM_SHUFFLE(1, 0, 1, 0)); __m256i _sl00_10 = _mm256_mullo_epi16(_val01_16, _w01_16); __m256i _sh00_10 = _mm256_mulhi_epi16(_val01_16, _w01_16); _sum0_2 = _mm256_add_epi32(_sum0_2, _mm256_unpacklo_epi16(_sl00_10, _sh00_10)); _sum1_3 = _mm256_add_epi32(_sum1_3, _mm256_unpackhi_epi16(_sl00_10, _sh00_10)); #else __m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr); __m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01); __m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01); __m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01); __m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0); #if __SSE4_1__ __m128i _w0 = _mm_cvtepi8_epi16(_w01); #else __m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01); __m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01); #endif __m128i _sl00 = _mm_mullo_epi16(_val0, _w0); __m128i _sh00 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl10 = _mm_mullo_epi16(_val1, _w0); __m128i _sh10 = _mm_mulhi_epi16(_val1, _w0); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl10, _sh10)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl10, _sh10)); #endif tmpptr += 16; kptr0 += 8; } #if __AVX2__ _sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3); __m128i _sum0 = _mm256_extracti128_si256(_sum0_2, 0); __m128i _sum2 = _mm256_extracti128_si256(_sum0_2, 1); #else _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); #endif outptr0[0] = _mm_reduce_add_epi32(_sum0); outptr0[1] = _mm_reduce_add_epi32(_sum2); outptr0 += 2; } for (; i < size; i++) { #if __AVX2__ const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = inch * maxk; // inch always > 0 __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); int j = 0; for (; j < nn; j++) { __m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr); #if __SSE4_1__ __m128i _val0 = _mm_cvtepi8_epi16(_val01); #else __m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01); __m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01); #endif __m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0); #if __SSE4_1__ __m128i _w0 = _mm_cvtepi8_epi16(_w01); #else __m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01); __m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01); #endif __m128i _sl00 = _mm_mullo_epi16(_val0, _w0); __m128i _sh00 = _mm_mulhi_epi16(_val0, _w0); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00)); tmpptr += 8; kptr0 += 8; } _sum0 = _mm_add_epi32(_sum0, _sum1); outptr0[0] = _mm_reduce_add_epi32(_sum0); outptr0 += 1; } } } static void convolution_im2col_sgemm_transform_kernel_pack8to1_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 8a-4b-maxk-inch/8a-outch/4b Mat kernel = _kernel.reshape(maxk, inch, outch); if (outch >= 4) kernel_tm.create(32 * maxk, inch / 8, outch / 4 + outch % 4, (size_t)1u); else kernel_tm.create(8 * maxk, inch / 8, outch, (size_t)1u); int q = 0; for (; q + 3 < outch; q += 4) { signed char* g00 = kernel_tm.channel(q / 4); for (int p = 0; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } } } } // TODO unroll 2 for (; q < outch; q++) { signed char* g00 = kernel_tm.channel(q / 4 + q % 4); for (int p = 0; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { for (int j = 0; j < 8; j++) { const signed char* k00 = kernel.channel(q).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } } } } static void convolution_im2col_sgemm_pack8to1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); int64_t* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const int64_t* sptr = img.row<const int64_t>(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_pack8to1_int8_sse(bottom_im2col, top_blob, kernel, opt); }
GB_unaryop__minv_int64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int64_int16 // op(A') function: GB_tran__minv_int64_int16 // C type: int64_t // A type: int16_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 64) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int64_int16 ( int64_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kmp_csupport.c
/* * kmp_csupport.c -- kfront linkage support for OpenMP. */ //===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// #include "omp.h" /* extern "C" declarations of user-visible routines */ #include "kmp.h" #include "kmp_i18n.h" #include "kmp_itt.h" #include "kmp_lock.h" #include "kmp_error.h" #include "kmp_stats.h" #if OMPT_SUPPORT #include "ompt-internal.h" #include "ompt-specific.h" #endif #define MAX_MESSAGE 512 /* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */ /* flags will be used in future, e.g., to implement */ /* openmp_strict library restrictions */ /*! * @ingroup STARTUP_SHUTDOWN * @param loc in source location information * @param flags in for future use (currently ignored) * * Initialize the runtime library. This call is optional; if it is not made then * it will be implicitly called by attempts to use other library functions. * */ void __kmpc_begin(ident_t *loc, kmp_int32 flags) { // By default __kmp_ignore_mppbeg() returns TRUE. if (__kmp_ignore_mppbeg() == FALSE) { __kmp_internal_begin(); KC_TRACE( 10, ("__kmpc_begin: called\n" ) ); } } /*! * @ingroup STARTUP_SHUTDOWN * @param loc source location information * * Shutdown the runtime library. This is also optional, and even if called will not * do anything unless the `KMP_IGNORE_MPPEND` environment variable is set to zero. */ void __kmpc_end(ident_t *loc) { // By default, __kmp_ignore_mppend() returns TRUE which makes __kmpc_end() call no-op. // However, this can be overridden with KMP_IGNORE_MPPEND environment variable. // If KMP_IGNORE_MPPEND is 0, __kmp_ignore_mppend() returns FALSE and __kmpc_end() // will unregister this root (it can cause library shut down). if (__kmp_ignore_mppend() == FALSE) { KC_TRACE( 10, ("__kmpc_end: called\n" ) ); KA_TRACE( 30, ("__kmpc_end\n" )); __kmp_internal_end_thread( -1 ); } } /*! @ingroup THREAD_STATES @param loc Source location information. @return The global thread index of the active thread. This function can be called in any context. If the runtime has ony been entered at the outermost level from a single (necessarily non-OpenMP<sup>*</sup>) thread, then the thread number is that which would be returned by omp_get_thread_num() in the outermost active parallel construct. (Or zero if there is no active parallel construct, since the master thread is necessarily thread zero). If multiple non-OpenMP threads all enter an OpenMP construct then this will be a unique thread identifier among all the threads created by the OpenMP runtime (but the value cannote be defined in terms of OpenMP thread ids returned by omp_get_thread_num()). */ kmp_int32 __kmpc_global_thread_num(ident_t *loc) { kmp_int32 gtid = __kmp_entry_gtid(); KC_TRACE( 10, ("__kmpc_global_thread_num: T#%d\n", gtid ) ); return gtid; } /*! @ingroup THREAD_STATES @param loc Source location information. @return The number of threads under control of the OpenMP<sup>*</sup> runtime This function can be called in any context. It returns the total number of threads under the control of the OpenMP runtime. That is not a number that can be determined by any OpenMP standard calls, since the library may be called from more than one non-OpenMP thread, and this reflects the total over all such calls. Similarly the runtime maintains underlying threads even when they are not active (since the cost of creating and destroying OS threads is high), this call counts all such threads even if they are not waiting for work. */ kmp_int32 __kmpc_global_num_threads(ident_t *loc) { KC_TRACE( 10, ("__kmpc_global_num_threads: num_threads = %d\n", __kmp_nth ) ); return TCR_4(__kmp_nth); } /*! @ingroup THREAD_STATES @param loc Source location information. @return The thread number of the calling thread in the innermost active parallel construct. */ kmp_int32 __kmpc_bound_thread_num(ident_t *loc) { KC_TRACE( 10, ("__kmpc_bound_thread_num: called\n" ) ); return __kmp_tid_from_gtid( __kmp_entry_gtid() ); } /*! @ingroup THREAD_STATES @param loc Source location information. @return The number of threads in the innermost active parallel construct. */ kmp_int32 __kmpc_bound_num_threads(ident_t *loc) { KC_TRACE( 10, ("__kmpc_bound_num_threads: called\n" ) ); return __kmp_entry_thread() -> th.th_team -> t.t_nproc; } /*! * @ingroup DEPRECATED * @param loc location description * * This function need not be called. It always returns TRUE. */ kmp_int32 __kmpc_ok_to_fork(ident_t *loc) { #ifndef KMP_DEBUG return TRUE; #else const char *semi2; const char *semi3; int line_no; if (__kmp_par_range == 0) { return TRUE; } semi2 = loc->psource; if (semi2 == NULL) { return TRUE; } semi2 = strchr(semi2, ';'); if (semi2 == NULL) { return TRUE; } semi2 = strchr(semi2 + 1, ';'); if (semi2 == NULL) { return TRUE; } if (__kmp_par_range_filename[0]) { const char *name = semi2 - 1; while ((name > loc->psource) && (*name != '/') && (*name != ';')) { name--; } if ((*name == '/') || (*name == ';')) { name++; } if (strncmp(__kmp_par_range_filename, name, semi2 - name)) { return __kmp_par_range < 0; } } semi3 = strchr(semi2 + 1, ';'); if (__kmp_par_range_routine[0]) { if ((semi3 != NULL) && (semi3 > semi2) && (strncmp(__kmp_par_range_routine, semi2 + 1, semi3 - semi2 - 1))) { return __kmp_par_range < 0; } } if (KMP_SSCANF(semi3 + 1, "%d", &line_no) == 1) { if ((line_no >= __kmp_par_range_lb) && (line_no <= __kmp_par_range_ub)) { return __kmp_par_range > 0; } return __kmp_par_range < 0; } return TRUE; #endif /* KMP_DEBUG */ } /*! @ingroup THREAD_STATES @param loc Source location information. @return 1 if this thread is executing inside an active parallel region, zero if not. */ kmp_int32 __kmpc_in_parallel( ident_t *loc ) { return __kmp_entry_thread() -> th.th_root -> r.r_active; } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number @param num_threads number of threads requested for this parallel construct Set the number of threads to be used by the next fork spawned by this thread. This call is only required if the parallel construct has a `num_threads` clause. */ void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads ) { KA_TRACE( 20, ("__kmpc_push_num_threads: enter T#%d num_threads=%d\n", global_tid, num_threads ) ); __kmp_push_num_threads( loc, global_tid, num_threads ); } void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid ) { KA_TRACE( 20, ("__kmpc_pop_num_threads: enter\n" ) ); /* the num_threads are automatically popped */ } #if OMP_40_ENABLED void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, kmp_int32 proc_bind ) { KA_TRACE( 20, ("__kmpc_push_proc_bind: enter T#%d proc_bind=%d\n", global_tid, proc_bind ) ); __kmp_push_proc_bind( loc, global_tid, (kmp_proc_bind_t)proc_bind ); } #endif /* OMP_40_ENABLED */ /*! @ingroup PARALLEL @param loc source location information @param argc total number of arguments in the ellipsis @param microtask pointer to callback routine consisting of outlined parallel construct @param ... pointers to shared variables that aren't global Do the actual fork and call the microtask in the relevant number of threads. */ void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) { int gtid = __kmp_entry_gtid(); #if (KMP_STATS_ENABLED) int inParallel = __kmpc_in_parallel(loc); if (inParallel) { KMP_COUNT_BLOCK(OMP_NESTED_PARALLEL); } else { KMP_COUNT_BLOCK(OMP_PARALLEL); } #endif // maybe to save thr_state is enough here { va_list ap; va_start( ap, microtask ); #if OMPT_SUPPORT int tid = __kmp_tid_from_gtid( gtid ); kmp_info_t *master_th = __kmp_threads[ gtid ]; kmp_team_t *parent_team = master_th->th.th_team; if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0); } #endif #if INCLUDE_SSC_MARKS SSC_MARK_FORKING(); #endif __kmp_fork_call( loc, gtid, fork_context_intel, argc, #if OMPT_SUPPORT VOLATILE_CAST(void *) microtask, // "unwrapped" task #endif VOLATILE_CAST(microtask_t) microtask, // "wrapped" task VOLATILE_CAST(launch_t) __kmp_invoke_task_func, /* TODO: revert workaround for Intel(R) 64 tracker #96 */ #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX &ap #else ap #endif ); #if INCLUDE_SSC_MARKS SSC_MARK_JOINING(); #endif __kmp_join_call( loc, gtid #if OMPT_SUPPORT , fork_context_intel #endif ); va_end( ap ); #if OMPT_SUPPORT if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = 0; } #endif } } #if OMP_40_ENABLED /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number @param num_teams number of teams requested for the teams construct @param num_threads number of threads per team requested for the teams construct Set the number of teams to be used by the teams construct. This call is only required if the teams construct has a `num_teams` clause or a `thread_limit` clause (or both). */ void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads ) { KA_TRACE( 20, ("__kmpc_push_num_teams: enter T#%d num_teams=%d num_threads=%d\n", global_tid, num_teams, num_threads ) ); __kmp_push_num_teams( loc, global_tid, num_teams, num_threads ); } /*! @ingroup PARALLEL @param loc source location information @param argc total number of arguments in the ellipsis @param microtask pointer to callback routine consisting of outlined teams construct @param ... pointers to shared variables that aren't global Do the actual fork and call the microtask in the relevant number of threads. */ void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) { int gtid = __kmp_entry_gtid(); kmp_info_t *this_thr = __kmp_threads[ gtid ]; va_list ap; va_start( ap, microtask ); KMP_COUNT_BLOCK(OMP_TEAMS); // remember teams entry point and nesting level this_thr->th.th_teams_microtask = microtask; this_thr->th.th_teams_level = this_thr->th.th_team->t.t_level; // AC: can be >0 on host #if OMPT_SUPPORT kmp_team_t *parent_team = this_thr->th.th_team; int tid = __kmp_tid_from_gtid( gtid ); if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0); } #endif // check if __kmpc_push_num_teams called, set default number of teams otherwise if ( this_thr->th.th_teams_size.nteams == 0 ) { __kmp_push_num_teams( loc, gtid, 0, 0 ); } KMP_DEBUG_ASSERT(this_thr->th.th_set_nproc >= 1); KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nteams >= 1); KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nth >= 1); __kmp_fork_call( loc, gtid, fork_context_intel, argc, #if OMPT_SUPPORT VOLATILE_CAST(void *) microtask, // "unwrapped" task #endif VOLATILE_CAST(microtask_t) __kmp_teams_master, // "wrapped" task VOLATILE_CAST(launch_t) __kmp_invoke_teams_master, #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX &ap #else ap #endif ); __kmp_join_call( loc, gtid #if OMPT_SUPPORT , fork_context_intel #endif ); #if OMPT_SUPPORT if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = NULL; } #endif this_thr->th.th_teams_microtask = NULL; this_thr->th.th_teams_level = 0; *(kmp_int64*)(&this_thr->th.th_teams_size) = 0L; va_end( ap ); } #endif /* OMP_40_ENABLED */ // // I don't think this function should ever have been exported. // The __kmpc_ prefix was misapplied. I'm fairly certain that no generated // openmp code ever called it, but it's been exported from the RTL for so // long that I'm afraid to remove the definition. // int __kmpc_invoke_task_func( int gtid ) { return __kmp_invoke_task_func( gtid ); } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number Enter a serialized parallel construct. This interface is used to handle a conditional parallel region, like this, @code #pragma omp parallel if (condition) @endcode when the condition is false. */ void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { __kmp_serialized_parallel(loc, global_tid); /* The implementation is now in kmp_runtime.c so that it can share static functions with * kmp_fork_call since the tasks to be done are similar in each case. */ } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number Leave a serialized parallel construct. */ void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { kmp_internal_control_t *top; kmp_info_t *this_thr; kmp_team_t *serial_team; KC_TRACE( 10, ("__kmpc_end_serialized_parallel: called by T#%d\n", global_tid ) ); /* skip all this code for autopar serialized loops since it results in unacceptable overhead */ if( loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR ) ) return; // Not autopar code if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); this_thr = __kmp_threads[ global_tid ]; serial_team = this_thr->th.th_serial_team; #if OMP_45_ENABLED kmp_task_team_t * task_team = this_thr->th.th_task_team; // we need to wait for the proxy tasks before finishing the thread if ( task_team != NULL && task_team->tt.tt_found_proxy_tasks ) __kmp_task_team_wait(this_thr, serial_team USE_ITT_BUILD_ARG(NULL) ); // is an ITT object needed here? #endif KMP_MB(); KMP_DEBUG_ASSERT( serial_team ); KMP_ASSERT( serial_team -> t.t_serialized ); KMP_DEBUG_ASSERT( this_thr -> th.th_team == serial_team ); KMP_DEBUG_ASSERT( serial_team != this_thr->th.th_root->r.r_root_team ); KMP_DEBUG_ASSERT( serial_team -> t.t_threads ); KMP_DEBUG_ASSERT( serial_team -> t.t_threads[0] == this_thr ); /* If necessary, pop the internal control stack values and replace the team values */ top = serial_team -> t.t_control_stack_top; if ( top && top -> serial_nesting_level == serial_team -> t.t_serialized ) { copy_icvs( &serial_team -> t.t_threads[0] -> th.th_current_task -> td_icvs, top ); serial_team -> t.t_control_stack_top = top -> next; __kmp_free(top); } //if( serial_team -> t.t_serialized > 1 ) serial_team -> t.t_level--; /* pop dispatch buffers stack */ KMP_DEBUG_ASSERT(serial_team->t.t_dispatch->th_disp_buffer); { dispatch_private_info_t * disp_buffer = serial_team->t.t_dispatch->th_disp_buffer; serial_team->t.t_dispatch->th_disp_buffer = serial_team->t.t_dispatch->th_disp_buffer->next; __kmp_free( disp_buffer ); } -- serial_team -> t.t_serialized; if ( serial_team -> t.t_serialized == 0 ) { /* return to the parallel section */ #if KMP_ARCH_X86 || KMP_ARCH_X86_64 if ( __kmp_inherit_fp_control && serial_team->t.t_fp_control_saved ) { __kmp_clear_x87_fpu_status_word(); __kmp_load_x87_fpu_control_word( &serial_team->t.t_x87_fpu_control_word ); __kmp_load_mxcsr( &serial_team->t.t_mxcsr ); } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ this_thr -> th.th_team = serial_team -> t.t_parent; this_thr -> th.th_info.ds.ds_tid = serial_team -> t.t_master_tid; /* restore values cached in the thread */ this_thr -> th.th_team_nproc = serial_team -> t.t_parent -> t.t_nproc; /* JPH */ this_thr -> th.th_team_master = serial_team -> t.t_parent -> t.t_threads[0]; /* JPH */ this_thr -> th.th_team_serialized = this_thr -> th.th_team -> t.t_serialized; /* TODO the below shouldn't need to be adjusted for serialized teams */ this_thr -> th.th_dispatch = & this_thr -> th.th_team -> t.t_dispatch[ serial_team -> t.t_master_tid ]; __kmp_pop_current_task_from_thread( this_thr ); KMP_ASSERT( this_thr -> th.th_current_task -> td_flags.executing == 0 ); this_thr -> th.th_current_task -> td_flags.executing = 1; if ( __kmp_tasking_mode != tskm_immediate_exec ) { // Copy the task team from the new child / old parent team to the thread. this_thr->th.th_task_team = this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state]; KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d restoring task_team %p / team %p\n", global_tid, this_thr -> th.th_task_team, this_thr -> th.th_team ) ); } } else { if ( __kmp_tasking_mode != tskm_immediate_exec ) { KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d decreasing nesting depth of serial team %p to %d\n", global_tid, serial_team, serial_team -> t.t_serialized ) ); } } if ( __kmp_env_consistency_check ) __kmp_pop_parallel( global_tid, NULL ); } /*! @ingroup SYNCHRONIZATION @param loc source location information. Execute <tt>flush</tt>. This is implemented as a full memory fence. (Though depending on the memory ordering convention obeyed by the compiler even that may not be necessary). */ void __kmpc_flush(ident_t *loc) { KC_TRACE( 10, ("__kmpc_flush: called\n" ) ); /* need explicit __mf() here since use volatile instead in library */ KMP_MB(); /* Flush all pending memory write invalidates. */ #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 ) #if KMP_MIC // fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used. // We shouldn't need it, though, since the ABI rules require that // * If the compiler generates NGO stores it also generates the fence // * If users hand-code NGO stores they should insert the fence // therefore no incomplete unordered stores should be visible. #else // C74404 // This is to address non-temporal store instructions (sfence needed). // The clflush instruction is addressed either (mfence needed). // Probably the non-temporal load monvtdqa instruction should also be addressed. // mfence is a SSE2 instruction. Do not execute it if CPU is not SSE2. if ( ! __kmp_cpuinfo.initialized ) { __kmp_query_cpuid( & __kmp_cpuinfo ); }; // if if ( ! __kmp_cpuinfo.sse2 ) { // CPU cannot execute SSE2 instructions. } else { #if KMP_COMPILER_ICC _mm_mfence(); #elif KMP_COMPILER_MSVC MemoryBarrier(); #else __sync_synchronize(); #endif // KMP_COMPILER_ICC }; // if #endif // KMP_MIC #elif (KMP_ARCH_ARM || KMP_ARCH_AARCH64) // Nothing to see here move along #elif KMP_ARCH_PPC64 // Nothing needed here (we have a real MB above). #if KMP_OS_CNK // The flushing thread needs to yield here; this prevents a // busy-waiting thread from saturating the pipeline. flush is // often used in loops like this: // while (!flag) { // #pragma omp flush(flag) // } // and adding the yield here is good for at least a 10x speedup // when running >2 threads per core (on the NAS LU benchmark). __kmp_yield(TRUE); #endif #else #error Unknown or unsupported architecture #endif } /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. Execute a barrier. */ void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid) { KMP_COUNT_BLOCK(OMP_BARRIER); KC_TRACE( 10, ("__kmpc_barrier: called T#%d\n", global_tid ) ); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user? }; // if __kmp_check_barrier( global_tid, ct_barrier, loc ); } __kmp_threads[ global_tid ]->th.th_ident = loc; // TODO: explicit barrier_wait_id: // this function is called when 'barrier' directive is present or // implicit barrier at the end of a worksharing construct. // 1) better to add a per-thread barrier counter to a thread data structure // 2) set to 0 when a new team is created // 4) no sync is required __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } /* The BARRIER for a MASTER section is always explicit */ /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @return 1 if this thread should execute the <tt>master</tt> block, 0 otherwise. */ kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid) { int status = 0; KC_TRACE( 10, ("__kmpc_master: called T#%d\n", global_tid ) ); if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); if( KMP_MASTER_GTID( global_tid )) { KMP_COUNT_BLOCK(OMP_MASTER); KMP_PUSH_PARTITIONED_TIMER(OMP_master); status = 1; } #if OMPT_SUPPORT && OMPT_TRACE if (status) { if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_master_begin)) { kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); ompt_callbacks.ompt_callback(ompt_event_master_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } } #endif if ( __kmp_env_consistency_check ) { #if KMP_USE_DYNAMIC_LOCK if (status) __kmp_push_sync( global_tid, ct_master, loc, NULL, 0 ); else __kmp_check_sync( global_tid, ct_master, loc, NULL, 0 ); #else if (status) __kmp_push_sync( global_tid, ct_master, loc, NULL ); else __kmp_check_sync( global_tid, ct_master, loc, NULL ); #endif } return status; } /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . Mark the end of a <tt>master</tt> region. This should only be called by the thread that executes the <tt>master</tt> region. */ void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid) { KC_TRACE( 10, ("__kmpc_end_master: called T#%d\n", global_tid ) ); KMP_DEBUG_ASSERT( KMP_MASTER_GTID( global_tid )); KMP_POP_PARTITIONED_TIMER(); #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_master_end)) { int tid = __kmp_tid_from_gtid( global_tid ); ompt_callbacks.ompt_callback(ompt_event_master_end)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } #endif if ( __kmp_env_consistency_check ) { if( global_tid < 0 ) KMP_WARNING( ThreadIdentInvalid ); if( KMP_MASTER_GTID( global_tid )) __kmp_pop_sync( global_tid, ct_master, loc ); } } /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. Start execution of an <tt>ordered</tt> construct. */ void __kmpc_ordered( ident_t * loc, kmp_int32 gtid ) { int cid = 0; kmp_info_t *th; KMP_DEBUG_ASSERT( __kmp_init_serial ); KC_TRACE( 10, ("__kmpc_ordered: called T#%d\n", gtid )); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); #if USE_ITT_BUILD __kmp_itt_ordered_prep( gtid ); // TODO: ordered_wait_id #endif /* USE_ITT_BUILD */ th = __kmp_threads[ gtid ]; #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled) { /* OMPT state update */ th->th.ompt_thread_info.wait_id = (uint64_t) loc; th->th.ompt_thread_info.state = ompt_state_wait_ordered; /* OMPT event callback */ if (ompt_callbacks.ompt_callback(ompt_event_wait_ordered)) { ompt_callbacks.ompt_callback(ompt_event_wait_ordered)( th->th.ompt_thread_info.wait_id); } } #endif if ( th -> th.th_dispatch -> th_deo_fcn != 0 ) (*th->th.th_dispatch->th_deo_fcn)( & gtid, & cid, loc ); else __kmp_parallel_deo( & gtid, & cid, loc ); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled) { /* OMPT state update */ th->th.ompt_thread_info.state = ompt_state_work_parallel; th->th.ompt_thread_info.wait_id = 0; /* OMPT event callback */ if (ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)) { ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)( th->th.ompt_thread_info.wait_id); } } #endif #if USE_ITT_BUILD __kmp_itt_ordered_start( gtid ); #endif /* USE_ITT_BUILD */ } /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. End execution of an <tt>ordered</tt> construct. */ void __kmpc_end_ordered( ident_t * loc, kmp_int32 gtid ) { int cid = 0; kmp_info_t *th; KC_TRACE( 10, ("__kmpc_end_ordered: called T#%d\n", gtid ) ); #if USE_ITT_BUILD __kmp_itt_ordered_end( gtid ); // TODO: ordered_wait_id #endif /* USE_ITT_BUILD */ th = __kmp_threads[ gtid ]; if ( th -> th.th_dispatch -> th_dxo_fcn != 0 ) (*th->th.th_dispatch->th_dxo_fcn)( & gtid, & cid, loc ); else __kmp_parallel_dxo( & gtid, & cid, loc ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_release_ordered)) { ompt_callbacks.ompt_callback(ompt_event_release_ordered)( th->th.ompt_thread_info.wait_id); } #endif } #if KMP_USE_DYNAMIC_LOCK static __forceinline void __kmp_init_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid, kmp_indirect_locktag_t tag) { // Pointer to the allocated indirect lock is written to crit, while indexing is ignored. void *idx; kmp_indirect_lock_t **lck; lck = (kmp_indirect_lock_t **)crit; kmp_indirect_lock_t *ilk = __kmp_allocate_indirect_lock(&idx, gtid, tag); KMP_I_LOCK_FUNC(ilk, init)(ilk->lock); KMP_SET_I_LOCK_LOCATION(ilk, loc); KMP_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section); KA_TRACE(20, ("__kmp_init_indirect_csptr: initialized indirect lock #%d\n", tag)); #if USE_ITT_BUILD __kmp_itt_critical_creating(ilk->lock, loc); #endif int status = KMP_COMPARE_AND_STORE_PTR(lck, 0, ilk); if (status == 0) { #if USE_ITT_BUILD __kmp_itt_critical_destroyed(ilk->lock); #endif // We don't really need to destroy the unclaimed lock here since it will be cleaned up at program exit. //KMP_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx); } KMP_DEBUG_ASSERT(*lck != NULL); } // Fast-path acquire tas lock #define KMP_ACQUIRE_TAS_LOCK(lock, gtid) { \ kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \ if (l->lk.poll != KMP_LOCK_FREE(tas) || \ ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \ kmp_uint32 spins; \ KMP_FSYNC_PREPARE(l); \ KMP_INIT_YIELD(spins); \ if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ KMP_YIELD(TRUE); \ } else { \ KMP_YIELD_SPIN(spins); \ } \ kmp_backoff_t backoff = __kmp_spin_backoff_params; \ while (l->lk.poll != KMP_LOCK_FREE(tas) || \ ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \ __kmp_spin_backoff(&backoff); \ if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ KMP_YIELD(TRUE); \ } else { \ KMP_YIELD_SPIN(spins); \ } \ } \ } \ KMP_FSYNC_ACQUIRED(l); \ } // Fast-path test tas lock #define KMP_TEST_TAS_LOCK(lock, gtid, rc) { \ kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \ rc = l->lk.poll == KMP_LOCK_FREE(tas) && \ KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas)); \ } // Fast-path release tas lock #define KMP_RELEASE_TAS_LOCK(lock, gtid) { \ TCW_4(((kmp_tas_lock_t *)lock)->lk.poll, KMP_LOCK_FREE(tas)); \ KMP_MB(); \ } #if KMP_USE_FUTEX # include <unistd.h> # include <sys/syscall.h> # ifndef FUTEX_WAIT # define FUTEX_WAIT 0 # endif # ifndef FUTEX_WAKE # define FUTEX_WAKE 1 # endif // Fast-path acquire futex lock #define KMP_ACQUIRE_FUTEX_LOCK(lock, gtid) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ kmp_int32 gtid_code = (gtid+1) << 1; \ KMP_MB(); \ KMP_FSYNC_PREPARE(ftx); \ kmp_int32 poll_val; \ while ((poll_val = KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), \ KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) { \ kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1; \ if (!cond) { \ if (!KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), poll_val, poll_val | KMP_LOCK_BUSY(1, futex))) { \ continue; \ } \ poll_val |= KMP_LOCK_BUSY(1, futex); \ } \ kmp_int32 rc; \ if ((rc = syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAIT, poll_val, NULL, NULL, 0)) != 0) { \ continue; \ } \ gtid_code |= 1; \ } \ KMP_FSYNC_ACQUIRED(ftx); \ } // Fast-path test futex lock #define KMP_TEST_FUTEX_LOCK(lock, gtid, rc) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ if (KMP_COMPARE_AND_STORE_ACQ32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), KMP_LOCK_BUSY(gtid+1 << 1, futex))) { \ KMP_FSYNC_ACQUIRED(ftx); \ rc = TRUE; \ } else { \ rc = FALSE; \ } \ } // Fast-path release futex lock #define KMP_RELEASE_FUTEX_LOCK(lock, gtid) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ KMP_MB(); \ KMP_FSYNC_RELEASING(ftx); \ kmp_int32 poll_val = KMP_XCHG_FIXED32(&(ftx->lk.poll), KMP_LOCK_FREE(futex)); \ if (KMP_LOCK_STRIP(poll_val) & 1) { \ syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), NULL, NULL, 0); \ } \ KMP_MB(); \ KMP_YIELD(TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)); \ } #endif // KMP_USE_FUTEX #else // KMP_USE_DYNAMIC_LOCK static kmp_user_lock_p __kmp_get_critical_section_ptr( kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid ) { kmp_user_lock_p *lck_pp = (kmp_user_lock_p *)crit; // // Because of the double-check, the following load // doesn't need to be volatile. // kmp_user_lock_p lck = (kmp_user_lock_p)TCR_PTR( *lck_pp ); if ( lck == NULL ) { void * idx; // Allocate & initialize the lock. // Remember allocated locks in table in order to free them in __kmp_cleanup() lck = __kmp_user_lock_allocate( &idx, gtid, kmp_lf_critical_section ); __kmp_init_user_lock_with_checks( lck ); __kmp_set_user_lock_location( lck, loc ); #if USE_ITT_BUILD __kmp_itt_critical_creating( lck ); // __kmp_itt_critical_creating() should be called *before* the first usage of underlying // lock. It is the only place where we can guarantee it. There are chances the lock will // destroyed with no usage, but it is not a problem, because this is not real event seen // by user but rather setting name for object (lock). See more details in kmp_itt.h. #endif /* USE_ITT_BUILD */ // // Use a cmpxchg instruction to slam the start of the critical // section with the lock pointer. If another thread beat us // to it, deallocate the lock, and use the lock that the other // thread allocated. // int status = KMP_COMPARE_AND_STORE_PTR( lck_pp, 0, lck ); if ( status == 0 ) { // Deallocate the lock and reload the value. #if USE_ITT_BUILD __kmp_itt_critical_destroyed( lck ); // Let ITT know the lock is destroyed and the same memory location may be reused for // another purpose. #endif /* USE_ITT_BUILD */ __kmp_destroy_user_lock_with_checks( lck ); __kmp_user_lock_free( &idx, gtid, lck ); lck = (kmp_user_lock_p)TCR_PTR( *lck_pp ); KMP_DEBUG_ASSERT( lck != NULL ); } } return lck; } #endif // KMP_USE_DYNAMIC_LOCK /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. Enter code protected by a `critical` construct. This function blocks until the executing thread can enter the critical section. */ void __kmpc_critical( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { #if KMP_USE_DYNAMIC_LOCK __kmpc_critical_with_hint(loc, global_tid, crit, omp_lock_hint_none); #else KMP_COUNT_BLOCK(OMP_CRITICAL); KMP_TIME_PARTITIONED_BLOCK(OMP_critical_wait); /* Time spent waiting to enter the critical section */ kmp_user_lock_p lck; KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) ); //TODO: add THR_OVHD_STATE KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #endif else { // ticket, queuing or drdpa lck = __kmp_get_critical_section_ptr( crit, loc, global_tid ); } if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_critical, loc, lck ); /* since the critical directive binds to all threads, not just * the current team we have to check this even if we are in a * serialized team */ /* also, even if we are the uber thread, we still have to conduct the lock, * as we have to contend with sibling threads */ #if USE_ITT_BUILD __kmp_itt_critical_acquiring( lck ); #endif /* USE_ITT_BUILD */ // Value of 'crit' should be good for using as a critical_id of the critical section directive. __kmp_acquire_user_lock_with_checks( lck, global_tid ); #if USE_ITT_BUILD __kmp_itt_critical_acquired( lck ); #endif /* USE_ITT_BUILD */ KMP_START_EXPLICIT_TIMER(OMP_critical); KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid )); #endif // KMP_USE_DYNAMIC_LOCK } #if KMP_USE_DYNAMIC_LOCK // Converts the given hint to an internal lock implementation static __forceinline kmp_dyna_lockseq_t __kmp_map_hint_to_lock(uintptr_t hint) { #if KMP_USE_TSX # define KMP_TSX_LOCK(seq) lockseq_##seq #else # define KMP_TSX_LOCK(seq) __kmp_user_lock_seq #endif #if KMP_ARCH_X86 || KMP_ARCH_X86_64 # define KMP_CPUINFO_RTM (__kmp_cpuinfo.rtm) #else # define KMP_CPUINFO_RTM 0 #endif // Hints that do not require further logic if (hint & kmp_lock_hint_hle) return KMP_TSX_LOCK(hle); if (hint & kmp_lock_hint_rtm) return KMP_CPUINFO_RTM ? KMP_TSX_LOCK(rtm): __kmp_user_lock_seq; if (hint & kmp_lock_hint_adaptive) return KMP_CPUINFO_RTM ? KMP_TSX_LOCK(adaptive): __kmp_user_lock_seq; // Rule out conflicting hints first by returning the default lock if ((hint & omp_lock_hint_contended) && (hint & omp_lock_hint_uncontended)) return __kmp_user_lock_seq; if ((hint & omp_lock_hint_speculative) && (hint & omp_lock_hint_nonspeculative)) return __kmp_user_lock_seq; // Do not even consider speculation when it appears to be contended if (hint & omp_lock_hint_contended) return lockseq_queuing; // Uncontended lock without speculation if ((hint & omp_lock_hint_uncontended) && !(hint & omp_lock_hint_speculative)) return lockseq_tas; // HLE lock for speculation if (hint & omp_lock_hint_speculative) return KMP_TSX_LOCK(hle); return __kmp_user_lock_seq; } /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number. @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. @param hint the lock hint. Enter code protected by a `critical` construct with a hint. The hint value is used to suggest a lock implementation. This function blocks until the executing thread can enter the critical section unless the hint suggests use of speculative execution and the hardware supports it. */ void __kmpc_critical_with_hint( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit, uintptr_t hint ) { KMP_COUNT_BLOCK(OMP_CRITICAL); kmp_user_lock_p lck; KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) ); kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit; // Check if it is initialized. if (*lk == 0) { kmp_dyna_lockseq_t lckseq = __kmp_map_hint_to_lock(hint); if (KMP_IS_D_LOCK(lckseq)) { KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(lckseq)); } else { __kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(lckseq)); } } // Branch for accessing the actual lock object and set operation. This branching is inevitable since // this lock initialization does not follow the normal dispatch path (lock table is not used). if (KMP_EXTRACT_D_TAG(lk) != 0) { lck = (kmp_user_lock_p)lk; if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_map_hint_to_lock(hint)); } # if USE_ITT_BUILD __kmp_itt_critical_acquiring(lck); # endif # if KMP_USE_INLINED_TAS if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) { KMP_ACQUIRE_TAS_LOCK(lck, global_tid); } else # elif KMP_USE_INLINED_FUTEX if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) { KMP_ACQUIRE_FUTEX_LOCK(lck, global_tid); } else # endif { KMP_D_LOCK_FUNC(lk, set)(lk, global_tid); } } else { kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk); lck = ilk->lock; if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_map_hint_to_lock(hint)); } # if USE_ITT_BUILD __kmp_itt_critical_acquiring(lck); # endif KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid); } #if USE_ITT_BUILD __kmp_itt_critical_acquired( lck ); #endif /* USE_ITT_BUILD */ KMP_PUSH_PARTITIONED_TIMER(OMP_critical); KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid )); } // __kmpc_critical_with_hint #endif // KMP_USE_DYNAMIC_LOCK /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. Leave a critical section, releasing any lock that was held during its execution. */ void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit) { kmp_user_lock_p lck; KC_TRACE( 10, ("__kmpc_end_critical: called T#%d\n", global_tid )); #if KMP_USE_DYNAMIC_LOCK if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { lck = (kmp_user_lock_p)crit; KMP_ASSERT(lck != NULL); if (__kmp_env_consistency_check) { __kmp_pop_sync(global_tid, ct_critical, loc); } # if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); # endif # if KMP_USE_INLINED_TAS if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) { KMP_RELEASE_TAS_LOCK(lck, global_tid); } else # elif KMP_USE_INLINED_FUTEX if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) { KMP_RELEASE_FUTEX_LOCK(lck, global_tid); } else # endif { KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid); } } else { kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit)); KMP_ASSERT(ilk != NULL); lck = ilk->lock; if (__kmp_env_consistency_check) { __kmp_pop_sync(global_tid, ct_critical, loc); } # if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); # endif KMP_I_LOCK_FUNC(ilk, unset)(lck, global_tid); } #else // KMP_USE_DYNAMIC_LOCK if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #endif else { // ticket, queuing or drdpa lck = (kmp_user_lock_p) TCR_PTR(*((kmp_user_lock_p *)crit)); } KMP_ASSERT(lck != NULL); if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_critical, loc ); #if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); #endif /* USE_ITT_BUILD */ // Value of 'crit' should be good for using as a critical_id of the critical section directive. __kmp_release_user_lock_with_checks( lck, global_tid ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_release_critical)) { ompt_callbacks.ompt_callback(ompt_event_release_critical)( (uint64_t) lck); } #endif #endif // KMP_USE_DYNAMIC_LOCK KMP_POP_PARTITIONED_TIMER(); KA_TRACE( 15, ("__kmpc_end_critical: done T#%d\n", global_tid )); } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. @return one if the thread should execute the master block, zero otherwise Start execution of a combined barrier and master. The barrier is executed inside this function. */ kmp_int32 __kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid) { int status; KC_TRACE( 10, ("__kmpc_barrier_master: called T#%d\n", global_tid ) ); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) __kmp_check_barrier( global_tid, ct_barrier, loc ); #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif status = __kmp_barrier( bs_plain_barrier, global_tid, TRUE, 0, NULL, NULL ); return (status != 0) ? 0 : 1; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. Complete the execution of a combined barrier and master. This function should only be called at the completion of the <tt>master</tt> code. Other threads will still be waiting at the barrier and this call releases them. */ void __kmpc_end_barrier_master(ident_t *loc, kmp_int32 global_tid) { KC_TRACE( 10, ("__kmpc_end_barrier_master: called T#%d\n", global_tid )); __kmp_end_split_barrier ( bs_plain_barrier, global_tid ); } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. @return one if the thread should execute the master block, zero otherwise Start execution of a combined barrier and master(nowait) construct. The barrier is executed inside this function. There is no equivalent "end" function, since the */ kmp_int32 __kmpc_barrier_master_nowait( ident_t * loc, kmp_int32 global_tid ) { kmp_int32 ret; KC_TRACE( 10, ("__kmpc_barrier_master_nowait: called T#%d\n", global_tid )); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user? } __kmp_check_barrier( global_tid, ct_barrier, loc ); } #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); ret = __kmpc_master (loc, global_tid); if ( __kmp_env_consistency_check ) { /* there's no __kmpc_end_master called; so the (stats) */ /* actions of __kmpc_end_master are done here */ if ( global_tid < 0 ) { KMP_WARNING( ThreadIdentInvalid ); } if (ret) { /* only one thread should do the pop since only */ /* one did the push (see __kmpc_master()) */ __kmp_pop_sync( global_tid, ct_master, loc ); } } return (ret); } /* The BARRIER for a SINGLE process section is always explicit */ /*! @ingroup WORK_SHARING @param loc source location information @param global_tid global thread number @return One if this thread should execute the single construct, zero otherwise. Test whether to execute a <tt>single</tt> construct. There are no implicit barriers in the two "single" calls, rather the compiler should introduce an explicit barrier if it is required. */ kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid) { kmp_int32 rc = __kmp_enter_single( global_tid, loc, TRUE ); if (rc) { // We are going to execute the single statement, so we should count it. KMP_COUNT_BLOCK(OMP_SINGLE); KMP_PUSH_PARTITIONED_TIMER(OMP_single); } #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); if (ompt_enabled) { if (rc) { if (ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)) { ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id, team->t.ompt_team_info.microtask); } } else { if (ompt_callbacks.ompt_callback(ompt_event_single_others_begin)) { ompt_callbacks.ompt_callback(ompt_event_single_others_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } this_thr->th.ompt_thread_info.state = ompt_state_wait_single; } } #endif return rc; } /*! @ingroup WORK_SHARING @param loc source location information @param global_tid global thread number Mark the end of a <tt>single</tt> construct. This function should only be called by the thread that executed the block of code protected by the `single` construct. */ void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid) { __kmp_exit_single( global_tid ); KMP_POP_PARTITIONED_TIMER(); #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)) { ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } #endif } /*! @ingroup WORK_SHARING @param loc Source location @param global_tid Global thread id Mark the end of a statically scheduled loop. */ void __kmpc_for_static_fini( ident_t *loc, kmp_int32 global_tid ) { KE_TRACE( 10, ("__kmpc_for_static_fini called T#%d\n", global_tid)); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_loop_end)) { ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL); ompt_task_info_t *task_info = __ompt_get_taskinfo(0); ompt_callbacks.ompt_callback(ompt_event_loop_end)( team_info->parallel_id, task_info->task_id); } #endif if ( __kmp_env_consistency_check ) __kmp_pop_workshare( global_tid, ct_pdo, loc ); } /* * User routines which take C-style arguments (call by value) * different from the Fortran equivalent routines */ void ompc_set_num_threads( int arg ) { // !!!!! TODO: check the per-task binding __kmp_set_num_threads( arg, __kmp_entry_gtid() ); } void ompc_set_dynamic( int flag ) { kmp_info_t *thread; /* For the thread-private implementation of the internal controls */ thread = __kmp_entry_thread(); __kmp_save_internal_controls( thread ); set__dynamic( thread, flag ? TRUE : FALSE ); } void ompc_set_nested( int flag ) { kmp_info_t *thread; /* For the thread-private internal controls implementation */ thread = __kmp_entry_thread(); __kmp_save_internal_controls( thread ); set__nested( thread, flag ? TRUE : FALSE ); } void ompc_set_max_active_levels( int max_active_levels ) { /* TO DO */ /* we want per-task implementation of this internal control */ /* For the per-thread internal controls implementation */ __kmp_set_max_active_levels( __kmp_entry_gtid(), max_active_levels ); } void ompc_set_schedule( omp_sched_t kind, int modifier ) { // !!!!! TODO: check the per-task binding __kmp_set_schedule( __kmp_entry_gtid(), ( kmp_sched_t ) kind, modifier ); } int ompc_get_ancestor_thread_num( int level ) { return __kmp_get_ancestor_thread_num( __kmp_entry_gtid(), level ); } int ompc_get_team_size( int level ) { return __kmp_get_team_size( __kmp_entry_gtid(), level ); } void kmpc_set_stacksize( int arg ) { // __kmp_aux_set_stacksize initializes the library if needed __kmp_aux_set_stacksize( arg ); } void kmpc_set_stacksize_s( size_t arg ) { // __kmp_aux_set_stacksize initializes the library if needed __kmp_aux_set_stacksize( arg ); } void kmpc_set_blocktime( int arg ) { int gtid, tid; kmp_info_t *thread; gtid = __kmp_entry_gtid(); tid = __kmp_tid_from_gtid(gtid); thread = __kmp_thread_from_gtid(gtid); __kmp_aux_set_blocktime( arg, thread, tid ); } void kmpc_set_library( int arg ) { // __kmp_user_set_library initializes the library if needed __kmp_user_set_library( (enum library_type)arg ); } void kmpc_set_defaults( char const * str ) { // __kmp_aux_set_defaults initializes the library if needed __kmp_aux_set_defaults( str, KMP_STRLEN( str ) ); } void kmpc_set_disp_num_buffers( int arg ) { // ignore after initialization because some teams have already // allocated dispatch buffers if( __kmp_init_serial == 0 && arg > 0 ) __kmp_dispatch_num_buffers = arg; } int kmpc_set_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_set_affinity_mask_proc( proc, mask ); #endif } int kmpc_unset_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_unset_affinity_mask_proc( proc, mask ); #endif } int kmpc_get_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_get_affinity_mask_proc( proc, mask ); #endif } /* -------------------------------------------------------------------------- */ /*! @ingroup THREADPRIVATE @param loc source location information @param gtid global thread number @param cpy_size size of the cpy_data buffer @param cpy_data pointer to data to be copied @param cpy_func helper function to call for copying data @param didit flag variable: 1=single thread; 0=not single thread __kmpc_copyprivate implements the interface for the private data broadcast needed for the copyprivate clause associated with a single region in an OpenMP<sup>*</sup> program (both C and Fortran). All threads participating in the parallel region call this routine. One of the threads (called the single thread) should have the <tt>didit</tt> variable set to 1 and all other threads should have that variable set to 0. All threads pass a pointer to a data buffer (cpy_data) that they have built. The OpenMP specification forbids the use of nowait on the single region when a copyprivate clause is present. However, @ref __kmpc_copyprivate implements a barrier internally to avoid race conditions, so the code generation for the single region should avoid generating a barrier after the call to @ref __kmpc_copyprivate. The <tt>gtid</tt> parameter is the global thread id for the current thread. The <tt>loc</tt> parameter is a pointer to source location information. Internal implementation: The single thread will first copy its descriptor address (cpy_data) to a team-private location, then the other threads will each call the function pointed to by the parameter cpy_func, which carries out the copy by copying the data using the cpy_data buffer. The cpy_func routine used for the copy and the contents of the data area defined by cpy_data and cpy_size may be built in any fashion that will allow the copy to be done. For instance, the cpy_data buffer can hold the actual data to be copied or it may hold a list of pointers to the data. The cpy_func routine must interpret the cpy_data buffer appropriately. The interface to cpy_func is as follows: @code void cpy_func( void *destination, void *source ) @endcode where void *destination is the cpy_data pointer for the thread being copied to and void *source is the cpy_data pointer for the thread being copied from. */ void __kmpc_copyprivate( ident_t *loc, kmp_int32 gtid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void*,void*), kmp_int32 didit ) { void **data_ptr; KC_TRACE( 10, ("__kmpc_copyprivate: called T#%d\n", gtid )); KMP_MB(); data_ptr = & __kmp_team_from_gtid( gtid )->t.t_copypriv_data; if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); } } /* ToDo: Optimize the following two barriers into some kind of split barrier */ if (didit) *data_ptr = cpy_data; /* This barrier is not a barrier region boundary */ #if USE_ITT_NOTIFY __kmp_threads[gtid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL ); if (! didit) (*cpy_func)( cpy_data, *data_ptr ); /* Consider next barrier the user-visible barrier for barrier region boundaries */ /* Nesting checks are already handled by the single construct checks */ #if USE_ITT_NOTIFY __kmp_threads[gtid]->th.th_ident = loc; // TODO: check if it is needed (e.g. tasks can overwrite the location) #endif __kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL ); } /* -------------------------------------------------------------------------- */ #define INIT_LOCK __kmp_init_user_lock_with_checks #define INIT_NESTED_LOCK __kmp_init_nested_user_lock_with_checks #define ACQUIRE_LOCK __kmp_acquire_user_lock_with_checks #define ACQUIRE_LOCK_TIMED __kmp_acquire_user_lock_with_checks_timed #define ACQUIRE_NESTED_LOCK __kmp_acquire_nested_user_lock_with_checks #define ACQUIRE_NESTED_LOCK_TIMED __kmp_acquire_nested_user_lock_with_checks_timed #define RELEASE_LOCK __kmp_release_user_lock_with_checks #define RELEASE_NESTED_LOCK __kmp_release_nested_user_lock_with_checks #define TEST_LOCK __kmp_test_user_lock_with_checks #define TEST_NESTED_LOCK __kmp_test_nested_user_lock_with_checks #define DESTROY_LOCK __kmp_destroy_user_lock_with_checks #define DESTROY_NESTED_LOCK __kmp_destroy_nested_user_lock_with_checks /* * TODO: Make check abort messages use location info & pass it * into with_checks routines */ #if KMP_USE_DYNAMIC_LOCK // internal lock initializer static __forceinline void __kmp_init_lock_with_hint(ident_t *loc, void **lock, kmp_dyna_lockseq_t seq) { if (KMP_IS_D_LOCK(seq)) { KMP_INIT_D_LOCK(lock, seq); #if USE_ITT_BUILD __kmp_itt_lock_creating((kmp_user_lock_p)lock, NULL); #endif } else { KMP_INIT_I_LOCK(lock, seq); #if USE_ITT_BUILD kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock); __kmp_itt_lock_creating(ilk->lock, loc); #endif } } // internal nest lock initializer static __forceinline void __kmp_init_nest_lock_with_hint(ident_t *loc, void **lock, kmp_dyna_lockseq_t seq) { #if KMP_USE_TSX // Don't have nested lock implementation for speculative locks if (seq == lockseq_hle || seq == lockseq_rtm || seq == lockseq_adaptive) seq = __kmp_user_lock_seq; #endif switch (seq) { case lockseq_tas: seq = lockseq_nested_tas; break; #if KMP_USE_FUTEX case lockseq_futex: seq = lockseq_nested_futex; break; #endif case lockseq_ticket: seq = lockseq_nested_ticket; break; case lockseq_queuing: seq = lockseq_nested_queuing; break; case lockseq_drdpa: seq = lockseq_nested_drdpa; break; default: seq = lockseq_nested_queuing; } KMP_INIT_I_LOCK(lock, seq); #if USE_ITT_BUILD kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock); __kmp_itt_lock_creating(ilk->lock, loc); #endif } /* initialize the lock with a hint */ void __kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint) { KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_lock_with_hint"); } __kmp_init_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint)); } /* initialize the lock with a hint */ void __kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint) { KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock_with_hint"); } __kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint)); } #endif // KMP_USE_DYNAMIC_LOCK /* initialize the lock */ void __kmpc_init_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_lock"); } __kmp_init_lock_with_hint(loc, user_lock, __kmp_user_lock_seq); #else // KMP_USE_DYNAMIC_LOCK static char const * const func = "omp_init_lock"; kmp_user_lock_p lck; KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( __kmp_env_consistency_check ) { if ( user_lock == NULL ) { KMP_FATAL( LockIsUninitialized, func ); } } KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_user_lock_allocate( user_lock, gtid, 0 ); } INIT_LOCK( lck ); __kmp_set_user_lock_location( lck, loc ); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_init_lock)) { ompt_callbacks.ompt_callback(ompt_event_init_lock)((uint64_t) lck); } #endif #if USE_ITT_BUILD __kmp_itt_lock_creating( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_init_lock /* initialize the lock */ void __kmpc_init_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock"); } __kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_user_lock_seq); #else // KMP_USE_DYNAMIC_LOCK static char const * const func = "omp_init_nest_lock"; kmp_user_lock_p lck; KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( __kmp_env_consistency_check ) { if ( user_lock == NULL ) { KMP_FATAL( LockIsUninitialized, func ); } } KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_user_lock_allocate( user_lock, gtid, 0 ); } INIT_NESTED_LOCK( lck ); __kmp_set_user_lock_location( lck, loc ); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_init_nest_lock)) { ompt_callbacks.ompt_callback(ompt_event_init_nest_lock)((uint64_t) lck); } #endif #if USE_ITT_BUILD __kmp_itt_lock_creating( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_init_nest_lock void __kmpc_destroy_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD kmp_user_lock_p lck; if (KMP_EXTRACT_D_TAG(user_lock) == 0) { lck = ((kmp_indirect_lock_t *)KMP_LOOKUP_I_LOCK(user_lock))->lock; } else { lck = (kmp_user_lock_p)user_lock; } __kmp_itt_lock_destroyed(lck); # endif KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock); #else kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_lock" ); } #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_destroy_lock)) { ompt_callbacks.ompt_callback(ompt_event_destroy_lock)((uint64_t) lck); } #endif #if USE_ITT_BUILD __kmp_itt_lock_destroyed( lck ); #endif /* USE_ITT_BUILD */ DESTROY_LOCK( lck ); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { ; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { ; } #endif else { __kmp_user_lock_free( user_lock, gtid, lck ); } #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_destroy_lock /* destroy the lock */ void __kmpc_destroy_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(user_lock); __kmp_itt_lock_destroyed(ilk->lock); # endif KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock); #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_nest_lock" ); } #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_destroy_nest_lock)) { ompt_callbacks.ompt_callback(ompt_event_destroy_nest_lock)((uint64_t) lck); } #endif #if USE_ITT_BUILD __kmp_itt_lock_destroyed( lck ); #endif /* USE_ITT_BUILD */ DESTROY_NESTED_LOCK( lck ); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { ; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { ; } #endif else { __kmp_user_lock_free( user_lock, gtid, lck ); } #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_destroy_nest_lock void __kmpc_set_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { KMP_COUNT_BLOCK(OMP_set_lock); #if KMP_USE_DYNAMIC_LOCK int tag = KMP_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); // itt function will get to the right lock object. # endif # if KMP_USE_INLINED_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { KMP_ACQUIRE_TAS_LOCK(user_lock, gtid); } else # elif KMP_USE_INLINED_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { KMP_ACQUIRE_FUTEX_LOCK(user_lock, gtid); } else # endif { __kmp_direct_set[tag]((kmp_dyna_lock_t *)user_lock, gtid); } # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); # endif #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_set_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ ACQUIRE_LOCK( lck, gtid ); #if USE_ITT_BUILD __kmp_itt_lock_acquired( lck ); #endif /* USE_ITT_BUILD */ #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_acquired_lock)) { ompt_callbacks.ompt_callback(ompt_event_acquired_lock)((uint64_t) lck); } #endif #endif // KMP_USE_DYNAMIC_LOCK } void __kmpc_set_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif KMP_D_LOCK_FUNC(user_lock, set)((kmp_dyna_lock_t *)user_lock, gtid); # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); #endif #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled) { // missing support here: need to know whether acquired first or not } #endif #else // KMP_USE_DYNAMIC_LOCK int acquire_status; kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_set_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ ACQUIRE_NESTED_LOCK( lck, gtid, &acquire_status ); #if USE_ITT_BUILD __kmp_itt_lock_acquired( lck ); #endif /* USE_ITT_BUILD */ #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled) { if (acquire_status == KMP_LOCK_ACQUIRED_FIRST) { if(ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_first)) ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_first)((uint64_t) lck); } else { if(ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_next)) ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_next)((uint64_t) lck); } } #endif #endif // KMP_USE_DYNAMIC_LOCK } void __kmpc_unset_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK int tag = KMP_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock); # endif # if KMP_USE_INLINED_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { KMP_RELEASE_TAS_LOCK(user_lock, gtid); } else # elif KMP_USE_INLINED_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { KMP_RELEASE_FUTEX_LOCK(user_lock, gtid); } else # endif { __kmp_direct_unset[tag]((kmp_dyna_lock_t *)user_lock, gtid); } #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; /* Can't use serial interval since not block structured */ /* release the lock */ if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) // "fast" path implemented to fix customer performance issue #if USE_ITT_BUILD __kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock ); #endif /* USE_ITT_BUILD */ TCW_4(((kmp_user_lock_p)user_lock)->tas.lk.poll, 0); KMP_MB(); return; #else lck = (kmp_user_lock_p)user_lock; #endif } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_unset_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_releasing( lck ); #endif /* USE_ITT_BUILD */ RELEASE_LOCK( lck, gtid ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_release_lock)) { ompt_callbacks.ompt_callback(ompt_event_release_lock)((uint64_t) lck); } #endif #endif // KMP_USE_DYNAMIC_LOCK } /* release the lock */ void __kmpc_unset_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock); # endif KMP_D_LOCK_FUNC(user_lock, unset)((kmp_dyna_lock_t *)user_lock, gtid); #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; /* Can't use serial interval since not block structured */ if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) // "fast" path implemented to fix customer performance issue kmp_tas_lock_t *tl = (kmp_tas_lock_t*)user_lock; #if USE_ITT_BUILD __kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock ); #endif /* USE_ITT_BUILD */ if ( --(tl->lk.depth_locked) == 0 ) { TCW_4(tl->lk.poll, 0); } KMP_MB(); return; #else lck = (kmp_user_lock_p)user_lock; #endif } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_unset_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_releasing( lck ); #endif /* USE_ITT_BUILD */ int release_status; release_status = RELEASE_NESTED_LOCK( lck, gtid ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled) { if (release_status == KMP_LOCK_RELEASED) { if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)) { ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)( (uint64_t) lck); } } else if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)) { ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)( (uint64_t) lck); } } #endif #endif // KMP_USE_DYNAMIC_LOCK } /* try to acquire the lock */ int __kmpc_test_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { KMP_COUNT_BLOCK(OMP_test_lock); #if KMP_USE_DYNAMIC_LOCK int rc; int tag = KMP_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif # if KMP_USE_INLINED_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { KMP_TEST_TAS_LOCK(user_lock, gtid, rc); } else # elif KMP_USE_INLINED_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { KMP_TEST_FUTEX_LOCK(user_lock, gtid, rc); } else # endif { rc = __kmp_direct_test[tag]((kmp_dyna_lock_t *)user_lock, gtid); } if (rc) { # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); # endif return FTN_TRUE; } else { # if USE_ITT_BUILD __kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock); # endif return FTN_FALSE; } #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; int rc; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_test_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ rc = TEST_LOCK( lck, gtid ); #if USE_ITT_BUILD if ( rc ) { __kmp_itt_lock_acquired( lck ); } else { __kmp_itt_lock_cancelled( lck ); } #endif /* USE_ITT_BUILD */ return ( rc ? FTN_TRUE : FTN_FALSE ); /* Can't use serial interval since not block structured */ #endif // KMP_USE_DYNAMIC_LOCK } /* try to acquire the lock */ int __kmpc_test_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK int rc; # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif rc = KMP_D_LOCK_FUNC(user_lock, test)((kmp_dyna_lock_t *)user_lock, gtid); # if USE_ITT_BUILD if (rc) { __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); } else { __kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock); } # endif return rc; #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; int rc; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_USE_FUTEX else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_test_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ rc = TEST_NESTED_LOCK( lck, gtid ); #if USE_ITT_BUILD if ( rc ) { __kmp_itt_lock_acquired( lck ); } else { __kmp_itt_lock_cancelled( lck ); } #endif /* USE_ITT_BUILD */ return rc; /* Can't use serial interval since not block structured */ #endif // KMP_USE_DYNAMIC_LOCK } /*--------------------------------------------------------------------------------------------------------------------*/ /* * Interface to fast scalable reduce methods routines */ // keep the selected method in a thread local structure for cross-function usage: will be used in __kmpc_end_reduce* functions; // another solution: to re-determine the method one more time in __kmpc_end_reduce* functions (new prototype required then) // AT: which solution is better? #define __KMP_SET_REDUCTION_METHOD(gtid,rmethod) \ ( ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) = ( rmethod ) ) #define __KMP_GET_REDUCTION_METHOD(gtid) \ ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) // description of the packed_reduction_method variable: look at the macros in kmp.h // used in a critical section reduce block static __forceinline void __kmp_enter_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { // this lock was visible to a customer and to the threading profile tool as a serial overhead span // (although it's used for an internal purpose only) // why was it visible in previous implementation? // should we keep it visible in new reduce block? kmp_user_lock_p lck; #if KMP_USE_DYNAMIC_LOCK kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit; // Check if it is initialized. if (*lk == 0) { if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(__kmp_user_lock_seq)); } else { __kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(__kmp_user_lock_seq)); } } // Branch for accessing the actual lock object and set operation. This branching is inevitable since // this lock initialization does not follow the normal dispatch path (lock table is not used). if (KMP_EXTRACT_D_TAG(lk) != 0) { lck = (kmp_user_lock_p)lk; KMP_DEBUG_ASSERT(lck != NULL); if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq); } KMP_D_LOCK_FUNC(lk, set)(lk, global_tid); } else { kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk); lck = ilk->lock; KMP_DEBUG_ASSERT(lck != NULL); if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq); } KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid); } #else // KMP_USE_DYNAMIC_LOCK // We know that the fast reduction code is only emitted by Intel compilers // with 32 byte critical sections. If there isn't enough space, then we // have to use a pointer. if ( __kmp_base_user_lock_size <= INTEL_CRITICAL_SIZE ) { lck = (kmp_user_lock_p)crit; } else { lck = __kmp_get_critical_section_ptr( crit, loc, global_tid ); } KMP_DEBUG_ASSERT( lck != NULL ); if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_critical, loc, lck ); __kmp_acquire_user_lock_with_checks( lck, global_tid ); #endif // KMP_USE_DYNAMIC_LOCK } // used in a critical section reduce block static __forceinline void __kmp_end_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { kmp_user_lock_p lck; #if KMP_USE_DYNAMIC_LOCK if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { lck = (kmp_user_lock_p)crit; if (__kmp_env_consistency_check) __kmp_pop_sync(global_tid, ct_critical, loc); KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid); } else { kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit)); if (__kmp_env_consistency_check) __kmp_pop_sync(global_tid, ct_critical, loc); KMP_I_LOCK_FUNC(ilk, unset)(ilk->lock, global_tid); } #else // KMP_USE_DYNAMIC_LOCK // We know that the fast reduction code is only emitted by Intel compilers with 32 byte critical // sections. If there isn't enough space, then we have to use a pointer. if ( __kmp_base_user_lock_size > 32 ) { lck = *( (kmp_user_lock_p *) crit ); KMP_ASSERT( lck != NULL ); } else { lck = (kmp_user_lock_p) crit; } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_critical, loc ); __kmp_release_user_lock_with_checks( lck, global_tid ); #endif // KMP_USE_DYNAMIC_LOCK } // __kmp_end_critical_section_reduce_block /* 2.a.i. Reduce Block without a terminating barrier */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread number @param num_vars number of items (variables) to be reduced @param reduce_size size of data in bytes to be reduced @param reduce_data pointer to data to be reduced @param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data @param lck pointer to the unique lock data structure @result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed The nowait version is used for a reduce clause with the nowait argument. */ kmp_int32 __kmpc_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck ) { KMP_COUNT_BLOCK(REDUCE_nowait); int retval = 0; PACKED_REDUCTION_METHOD_T packed_reduction_method; #if OMP_40_ENABLED kmp_team_t *team; kmp_info_t *th; int teams_swapped = 0, task_state; #endif KA_TRACE( 10, ( "__kmpc_reduce_nowait() enter: called T#%d\n", global_tid ) ); // why do we need this initialization here at all? // Reduction clause can not be used as a stand-alone directive. // do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed // possible detection of false-positive race by the threadchecker ??? if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); // check correctness of reduce block nesting #if KMP_USE_DYNAMIC_LOCK if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 ); #else if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL ); #endif #if OMP_40_ENABLED th = __kmp_thread_from_gtid(global_tid); if( th->th.th_teams_microtask ) { // AC: check if we are inside the teams construct? team = th->th.th_team; if( team->t.t_level == th->th.th_teams_level ) { // this is reduction at teams construct KMP_DEBUG_ASSERT(!th->th.th_info.ds.ds_tid); // AC: check that tid == 0 // Let's swap teams temporarily for the reduction barrier teams_swapped = 1; th->th.th_info.ds.ds_tid = team->t.t_master_tid; th->th.th_team = team->t.t_parent; th->th.th_team_nproc = th->th.th_team->t.t_nproc; th->th.th_task_team = th->th.th_team->t.t_task_team[0]; task_state = th->th.th_task_state; th->th.th_task_state = 0; } } #endif // OMP_40_ENABLED // packed_reduction_method value will be reused by __kmp_end_reduce* function, the value should be kept in a variable // the variable should be either a construct-specific or thread-specific property, not a team specific property // (a thread can reach the next reduce block on the next construct, reduce method may differ on the next construct) // an ident_t "loc" parameter could be used as a construct-specific property (what if loc == 0?) // (if both construct-specific and team-specific variables were shared, then unness extra syncs should be needed) // a thread-specific variable is better regarding two issues above (next construct and extra syncs) // a thread-specific "th_local.reduction_method" variable is used currently // each thread executes 'determine' and 'set' lines (no need to execute by one thread, to avoid unness extra syncs) packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck ); __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method ); if( packed_reduction_method == critical_reduce_block ) { __kmp_enter_critical_section_reduce_block( loc, global_tid, lck ); retval = 1; } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) retval = 1; } else if( packed_reduction_method == atomic_reduce_block ) { retval = 2; // all threads should do this pop here (because __kmpc_end_reduce_nowait() won't be called by the code gen) // (it's not quite good, because the checking block has been closed by this 'pop', // but atomic operation has not been executed yet, will be executed slightly later, literally on next instruction) if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { //AT: performance issue: a real barrier here //AT: (if master goes slow, other threads are blocked here waiting for the master to come and release them) //AT: (it's not what a customer might expect specifying NOWAIT clause) //AT: (specifying NOWAIT won't result in improvement of performance, it'll be confusing to a customer) //AT: another implementation of *barrier_gather*nowait() (or some other design) might go faster // and be more in line with sense of NOWAIT //AT: TO DO: do epcc test and compare times // this barrier should be invisible to a customer and to the threading profile tool // (it's neither a terminating barrier nor customer's code, it's used for an internal purpose) #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, FALSE, reduce_size, reduce_data, reduce_func ); retval = ( retval != 0 ) ? ( 0 ) : ( 1 ); // all other workers except master should do this pop here // ( none of other workers will get to __kmpc_end_reduce_nowait() ) if ( __kmp_env_consistency_check ) { if( retval == 0 ) { __kmp_pop_sync( global_tid, ct_reduce, loc ); } } } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } #if OMP_40_ENABLED if( teams_swapped ) { // Restore thread structure th->th.th_info.ds.ds_tid = 0; th->th.th_team = team; th->th.th_team_nproc = team->t.t_nproc; th->th.th_task_team = team->t.t_task_team[task_state]; th->th.th_task_state = task_state; } #endif KA_TRACE( 10, ( "__kmpc_reduce_nowait() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) ); return retval; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread id. @param lck pointer to the unique lock data structure Finish the execution of a reduce nowait. */ void __kmpc_end_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) { PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() enter: called T#%d\n", global_tid ) ); packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid ); if( packed_reduction_method == critical_reduce_block ) { __kmp_end_critical_section_reduce_block( loc, global_tid, lck ); } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( on Intel platforms only ) } else if( packed_reduction_method == atomic_reduce_block ) { // neither master nor other workers should get here // (code gen does not generate this call in case 2: atomic reduce block) // actually it's better to remove this elseif at all; // after removal this value will checked by the 'else' and will assert } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { // only master gets here } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) ); return; } /* 2.a.ii. Reduce Block with a terminating barrier */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread number @param num_vars number of items (variables) to be reduced @param reduce_size size of data in bytes to be reduced @param reduce_data pointer to data to be reduced @param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data @param lck pointer to the unique lock data structure @result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed A blocking reduce that includes an implicit barrier. */ kmp_int32 __kmpc_reduce( ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck ) { KMP_COUNT_BLOCK(REDUCE_wait); int retval = 0; PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_reduce() enter: called T#%d\n", global_tid ) ); // why do we need this initialization here at all? // Reduction clause can not be a stand-alone directive. // do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed // possible detection of false-positive race by the threadchecker ??? if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); // check correctness of reduce block nesting #if KMP_USE_DYNAMIC_LOCK if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 ); #else if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL ); #endif packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck ); __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method ); if( packed_reduction_method == critical_reduce_block ) { __kmp_enter_critical_section_reduce_block( loc, global_tid, lck ); retval = 1; } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) retval = 1; } else if( packed_reduction_method == atomic_reduce_block ) { retval = 2; } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { //case tree_reduce_block: // this barrier should be visible to a customer and to the threading profile tool // (it's a terminating barrier on constructs if NOWAIT not specified) #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; // needed for correct notification of frames #endif retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, TRUE, reduce_size, reduce_data, reduce_func ); retval = ( retval != 0 ) ? ( 0 ) : ( 1 ); // all other workers except master should do this pop here // ( none of other workers except master will enter __kmpc_end_reduce() ) if ( __kmp_env_consistency_check ) { if( retval == 0 ) { // 0: all other workers; 1: master __kmp_pop_sync( global_tid, ct_reduce, loc ); } } } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } KA_TRACE( 10, ( "__kmpc_reduce() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) ); return retval; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread id. @param lck pointer to the unique lock data structure Finish the execution of a blocking reduce. The <tt>lck</tt> pointer must be the same as that used in the corresponding start function. */ void __kmpc_end_reduce( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) { PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_end_reduce() enter: called T#%d\n", global_tid ) ); packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid ); // this barrier should be visible to a customer and to the threading profile tool // (it's a terminating barrier on constructs if NOWAIT not specified) if( packed_reduction_method == critical_reduce_block ) { __kmp_end_critical_section_reduce_block( loc, global_tid, lck ); // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( packed_reduction_method == atomic_reduce_block ) { // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { // only master executes here (master releases all other workers) __kmp_end_split_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid ); } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); KA_TRACE( 10, ( "__kmpc_end_reduce() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) ); return; } #undef __KMP_GET_REDUCTION_METHOD #undef __KMP_SET_REDUCTION_METHOD /*-- end of interface to fast scalable reduce routines ---------------------------------------------------------------*/ kmp_uint64 __kmpc_get_taskid() { kmp_int32 gtid; kmp_info_t * thread; gtid = __kmp_get_gtid(); if ( gtid < 0 ) { return 0; }; // if thread = __kmp_thread_from_gtid( gtid ); return thread->th.th_current_task->td_task_id; } // __kmpc_get_taskid kmp_uint64 __kmpc_get_parent_taskid() { kmp_int32 gtid; kmp_info_t * thread; kmp_taskdata_t * parent_task; gtid = __kmp_get_gtid(); if ( gtid < 0 ) { return 0; }; // if thread = __kmp_thread_from_gtid( gtid ); parent_task = thread->th.th_current_task->td_parent; return ( parent_task == NULL ? 0 : parent_task->td_task_id ); } // __kmpc_get_parent_taskid void __kmpc_place_threads(int nS, int sO, int nC, int cO, int nT) { if ( ! __kmp_init_serial ) { __kmp_serial_initialize(); } __kmp_place_num_sockets = nS; __kmp_place_socket_offset = sO; __kmp_place_num_cores = nC; __kmp_place_core_offset = cO; __kmp_place_num_threads_per_core = nT; } #if OMP_45_ENABLED /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. @param num_dims number of associated doacross loops. @param dims info on loops bounds. Initialize doacross loop information. Expect compiler send us inclusive bounds, e.g. for(i=2;i<9;i+=2) lo=2, up=8, st=2. */ void __kmpc_doacross_init(ident_t *loc, int gtid, int num_dims, struct kmp_dim * dims) { int j, idx; kmp_int64 last, trace_count; kmp_info_t *th = __kmp_threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_uint32 *flags; kmp_disp_t *pr_buf = th->th.th_dispatch; dispatch_shared_info_t *sh_buf; KA_TRACE(20,("__kmpc_doacross_init() enter: called T#%d, num dims %d, active %d\n", gtid, num_dims, !team->t.t_serialized)); KMP_DEBUG_ASSERT(dims != NULL); KMP_DEBUG_ASSERT(num_dims > 0); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_init() exit: serialized team\n")); return; // no dependencies if team is serialized } KMP_DEBUG_ASSERT(team->t.t_nproc > 1); idx = pr_buf->th_doacross_buf_idx++; // Increment index of shared buffer for the next loop sh_buf = &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers]; // Save bounds info into allocated private buffer KMP_DEBUG_ASSERT(pr_buf->th_doacross_info == NULL); pr_buf->th_doacross_info = (kmp_int64*)__kmp_thread_malloc(th, sizeof(kmp_int64)*(4 * num_dims + 1)); KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); pr_buf->th_doacross_info[0] = (kmp_int64)num_dims; // first element is number of dimensions // Save also address of num_done in order to access it later without knowing the buffer index pr_buf->th_doacross_info[1] = (kmp_int64)&sh_buf->doacross_num_done; pr_buf->th_doacross_info[2] = dims[0].lo; pr_buf->th_doacross_info[3] = dims[0].up; pr_buf->th_doacross_info[4] = dims[0].st; last = 5; for( j = 1; j < num_dims; ++j ) { kmp_int64 range_length; // To keep ranges of all dimensions but the first dims[0] if( dims[j].st == 1 ) { // most common case // AC: should we care of ranges bigger than LLONG_MAX? (not for now) range_length = dims[j].up - dims[j].lo + 1; } else { if( dims[j].st > 0 ) { KMP_DEBUG_ASSERT(dims[j].up > dims[j].lo); range_length = (kmp_uint64)(dims[j].up - dims[j].lo) / dims[j].st + 1; } else { // negative increment KMP_DEBUG_ASSERT(dims[j].lo > dims[j].up); range_length = (kmp_uint64)(dims[j].lo - dims[j].up) / (-dims[j].st) + 1; } } pr_buf->th_doacross_info[last++] = range_length; pr_buf->th_doacross_info[last++] = dims[j].lo; pr_buf->th_doacross_info[last++] = dims[j].up; pr_buf->th_doacross_info[last++] = dims[j].st; } // Compute total trip count. // Start with range of dims[0] which we don't need to keep in the buffer. if( dims[0].st == 1 ) { // most common case trace_count = dims[0].up - dims[0].lo + 1; } else if( dims[0].st > 0 ) { KMP_DEBUG_ASSERT(dims[0].up > dims[0].lo); trace_count = (kmp_uint64)(dims[0].up - dims[0].lo) / dims[0].st + 1; } else { // negative increment KMP_DEBUG_ASSERT(dims[0].lo > dims[0].up); trace_count = (kmp_uint64)(dims[0].lo - dims[0].up) / (-dims[0].st) + 1; } for( j = 1; j < num_dims; ++j ) { trace_count *= pr_buf->th_doacross_info[4 * j + 1]; // use kept ranges } KMP_DEBUG_ASSERT(trace_count > 0); // Check if shared buffer is not occupied by other loop (idx - __kmp_dispatch_num_buffers) if( idx != sh_buf->doacross_buf_idx ) { // Shared buffer is occupied, wait for it to be free __kmp_wait_yield_4( (kmp_uint32*)&sh_buf->doacross_buf_idx, idx, __kmp_eq_4, NULL ); } // Check if we are the first thread. After the CAS the first thread gets 0, // others get 1 if initialization is in progress, allocated pointer otherwise. flags = (kmp_uint32*)KMP_COMPARE_AND_STORE_RET64( (kmp_int64*)&sh_buf->doacross_flags,NULL,(kmp_int64)1); if( flags == NULL ) { // we are the first thread, allocate the array of flags kmp_int64 size = trace_count / 8 + 8; // in bytes, use single bit per iteration sh_buf->doacross_flags = (kmp_uint32*)__kmp_thread_calloc(th, size, 1); } else if( (kmp_int64)flags == 1 ) { // initialization is still in progress, need to wait while( (volatile kmp_int64)sh_buf->doacross_flags == 1 ) { KMP_YIELD(TRUE); } } KMP_DEBUG_ASSERT((kmp_int64)sh_buf->doacross_flags > 1); // check value of pointer pr_buf->th_doacross_flags = sh_buf->doacross_flags; // save private copy in order to not // touch shared buffer on each iteration KA_TRACE(20,("__kmpc_doacross_init() exit: T#%d\n", gtid)); } void __kmpc_doacross_wait(ident_t *loc, int gtid, long long *vec) { kmp_int32 shft, num_dims, i; kmp_uint32 flag; kmp_int64 iter_number; // iteration number of "collapsed" loop nest kmp_info_t *th = __kmp_threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_disp_t *pr_buf; kmp_int64 lo, up, st; KA_TRACE(20,("__kmpc_doacross_wait() enter: called T#%d\n", gtid)); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_wait() exit: serialized team\n")); return; // no dependencies if team is serialized } // calculate sequential iteration number and check out-of-bounds condition pr_buf = th->th.th_dispatch; KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); num_dims = pr_buf->th_doacross_info[0]; lo = pr_buf->th_doacross_info[2]; up = pr_buf->th_doacross_info[3]; st = pr_buf->th_doacross_info[4]; if( st == 1 ) { // most common case if( vec[0] < lo || vec[0] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[0], lo, up)); return; } iter_number = vec[0] - lo; } else if( st > 0 ) { if( vec[0] < lo || vec[0] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[0], lo, up)); return; } iter_number = (kmp_uint64)(vec[0] - lo) / st; } else { // negative increment if( vec[0] > lo || vec[0] < up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[0], lo, up)); return; } iter_number = (kmp_uint64)(lo - vec[0]) / (-st); } for( i = 1; i < num_dims; ++i ) { kmp_int64 iter, ln; kmp_int32 j = i * 4; ln = pr_buf->th_doacross_info[j + 1]; lo = pr_buf->th_doacross_info[j + 2]; up = pr_buf->th_doacross_info[j + 3]; st = pr_buf->th_doacross_info[j + 4]; if( st == 1 ) { if( vec[i] < lo || vec[i] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[i], lo, up)); return; } iter = vec[i] - lo; } else if( st > 0 ) { if( vec[i] < lo || vec[i] > up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[i], lo, up)); return; } iter = (kmp_uint64)(vec[i] - lo) / st; } else { // st < 0 if( vec[i] > lo || vec[i] < up ) { KA_TRACE(20,( "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", gtid, vec[i], lo, up)); return; } iter = (kmp_uint64)(lo - vec[i]) / (-st); } iter_number = iter + ln * iter_number; } shft = iter_number % 32; // use 32-bit granularity iter_number >>= 5; // divided by 32 flag = 1 << shft; while( (flag & pr_buf->th_doacross_flags[iter_number]) == 0 ) { KMP_YIELD(TRUE); } KA_TRACE(20,("__kmpc_doacross_wait() exit: T#%d wait for iter %lld completed\n", gtid, (iter_number<<5)+shft)); } void __kmpc_doacross_post(ident_t *loc, int gtid, long long *vec) { kmp_int32 shft, num_dims, i; kmp_uint32 flag; kmp_int64 iter_number; // iteration number of "collapsed" loop nest kmp_info_t *th = __kmp_threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_disp_t *pr_buf; kmp_int64 lo, st; KA_TRACE(20,("__kmpc_doacross_post() enter: called T#%d\n", gtid)); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_post() exit: serialized team\n")); return; // no dependencies if team is serialized } // calculate sequential iteration number (same as in "wait" but no out-of-bounds checks) pr_buf = th->th.th_dispatch; KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); num_dims = pr_buf->th_doacross_info[0]; lo = pr_buf->th_doacross_info[2]; st = pr_buf->th_doacross_info[4]; if( st == 1 ) { // most common case iter_number = vec[0] - lo; } else if( st > 0 ) { iter_number = (kmp_uint64)(vec[0] - lo) / st; } else { // negative increment iter_number = (kmp_uint64)(lo - vec[0]) / (-st); } for( i = 1; i < num_dims; ++i ) { kmp_int64 iter, ln; kmp_int32 j = i * 4; ln = pr_buf->th_doacross_info[j + 1]; lo = pr_buf->th_doacross_info[j + 2]; st = pr_buf->th_doacross_info[j + 4]; if( st == 1 ) { iter = vec[i] - lo; } else if( st > 0 ) { iter = (kmp_uint64)(vec[i] - lo) / st; } else { // st < 0 iter = (kmp_uint64)(lo - vec[i]) / (-st); } iter_number = iter + ln * iter_number; } shft = iter_number % 32; // use 32-bit granularity iter_number >>= 5; // divided by 32 flag = 1 << shft; if( (flag & pr_buf->th_doacross_flags[iter_number]) == 0 ) KMP_TEST_THEN_OR32( (kmp_int32*)&pr_buf->th_doacross_flags[iter_number], (kmp_int32)flag ); KA_TRACE(20,("__kmpc_doacross_post() exit: T#%d iter %lld posted\n", gtid, (iter_number<<5)+shft)); } void __kmpc_doacross_fini(ident_t *loc, int gtid) { kmp_int64 num_done; kmp_info_t *th = __kmp_threads[gtid]; kmp_team_t *team = th->th.th_team; kmp_disp_t *pr_buf = th->th.th_dispatch; KA_TRACE(20,("__kmpc_doacross_fini() enter: called T#%d\n", gtid)); if( team->t.t_serialized ) { KA_TRACE(20,("__kmpc_doacross_fini() exit: serialized team %p\n", team)); return; // nothing to do } num_done = KMP_TEST_THEN_INC64((kmp_int64*)pr_buf->th_doacross_info[1]) + 1; if( num_done == th->th.th_team_nproc ) { // we are the last thread, need to free shared resources int idx = pr_buf->th_doacross_buf_idx - 1; dispatch_shared_info_t *sh_buf = &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers]; KMP_DEBUG_ASSERT(pr_buf->th_doacross_info[1] == (kmp_int64)&sh_buf->doacross_num_done); KMP_DEBUG_ASSERT(num_done == (kmp_int64)sh_buf->doacross_num_done); KMP_DEBUG_ASSERT(idx == sh_buf->doacross_buf_idx); __kmp_thread_free(th, (void*)sh_buf->doacross_flags); sh_buf->doacross_flags = NULL; sh_buf->doacross_num_done = 0; sh_buf->doacross_buf_idx += __kmp_dispatch_num_buffers; // free buffer for future re-use } // free private resources (need to keep buffer index forever) __kmp_thread_free(th, (void*)pr_buf->th_doacross_info); pr_buf->th_doacross_info = NULL; KA_TRACE(20,("__kmpc_doacross_fini() exit: T#%d\n", gtid)); } #endif // end of file //
spot.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "pix.h" /*------------------------------------------------------------------------- * * Used to sort spots in descent of frame IDs. * *------------------------------------------------------------------------*/ int spot_cmp(const void *a, const void *b) { sp_t *aa = *((sp_t **)a); sp_t *bb = *((sp_t **)b); if (aa != NULL && bb != NULL) { if (aa->fID > bb->fID) return -1; else if (aa->fID < bb->fID) return 1; else return 0; } else { if (aa == NULL && bb == NULL) return 0; else if (aa == NULL) return 1; else return -1; } } /*------------------------------------------------------------------------- * * Spot finding: from a set of successive frames. * *------------------------------------------------------------------------*/ void spot_dframe(para_t *p) { frameIO_t *fio; frameloc_t *fm0=NULL, *fm1=NULL; int fID, fID0, fIDN, twoperc, n; fID0 = p->frameID1; fIDN = p->frameID2; fio = frameIO_Open(p); twoperc = (fIDN-fID0+1) / 50; printf("Frames for analysis: (%d,%d)\n", fID0, fIDN); fm1 = frameIO(p, fio, fIDN); // load the *next* frame n = 0; for (fID=fIDN-1; fID >= fID0; fID--) { fm0 = frameIO(p, fio, fID); // load the *this* frame if (p->alg == 0) frameSpots(p, fm0, fm1); else frameSpot2(p, fm0, fm1); frameDelete(fm1); frame_sum(p, fm0); fm1 = fm0; n++; if (twoperc > 0 && n % twoperc == 0) { fprintf(stderr, "."); fflush(stderr); } } fprintf(stderr, "\n"); frameIO_Close(p, fio); frameDelete(fm1); } /*------------------------------------------------------------------------- * * Spot finding: from a single frame. * *------------------------------------------------------------------------*/ void spot_sframe(para_t *p) { frameIO_t *fio; frameloc_t *fm=NULL; fio = frameIO_Open(p); fm = frameIO(p, fio, p->frameID2); if (p->alg == 0) frameSpots(p, fm, NULL); else frameSpot2(p, fm, NULL); frame_sum(p, fm); frameDelete(fm); } /*------------------------------------------------------------------------- * * Spot handling: fitting for all the candidate spots. * *------------------------------------------------------------------------*/ void spot_fitting(para_t *p) { FILE *f; double *x_fit, *y_fit, dt; sp_t **sp; int i, x, y, r, n, twoperc; int sdim_x, sdim_y, x_rng, y_rng, imglen; // Prepare to show the progress of running. printf("Found candidate spots: %d\n", p->n_sp1); fflush(stdout); twoperc = p->n_sp1 / 50; // Prepare coordinate of pixels of the spot, which is relative to its center. sdim_x = p->x_find_pixels; sdim_y = p->y_find_pixels; imglen = sdim_x * sdim_y; x_rng = sdim_x / 2; y_rng = sdim_y / 2; x_fit = malloc(imglen * sizeof(double)); y_fit = malloc(imglen * sizeof(double)); if (!x_fit || !y_fit) pstop("!!! SpotFit: not enough memory.\n"); i = 0; for (y=-y_rng; y <= y_rng; y++) { for (x=-x_rng; x <= x_rng; x++) { x_fit[i] = x; y_fit[i] = y; i++; } } // Fitting for the normal spots. sp = p->sp1; n = 0; #pragma omp parallel for private(i,r) reduction(+:n) for (i=0; i < p->n_sp1; i++) { r = SpotFit(p, x_fit, y_fit, sp[i]); if (r == 0) n++; if (twoperc > 0 && i % twoperc == 0) { fprintf(stderr, "."); fflush(stderr); } } fprintf(stderr, "\n"); printf("Total valid particles: %d\n", n); // Fitting for the high-intensity spots. sp = p->sp2; n = 0; #pragma omp parallel for private(i,r) reduction(+:n) for (i=0; i < p->n_sp2; i++) { r = SpotFit(p, x_fit, y_fit, sp[i]); if (r == 0) n++; } printf("Total high-intensity particles: %d\n", n); // Output the results of normal spots, and update the statistics. f = out_fit_init(p, p->outfn); sp = p->sp1; for (i=0, n=0; i < p->n_sp1; i++) { if (sp[i] && sp[i]->res) { out_fit(p, f, n, sp[i]); x = sp[i]->fID - p->frameID1; p->fsts[x].n_event++; n++; } } dt = get_realtime(); fprintf(f, "ExecTime: %E sec\n", dt); printf("ExecTime: %E sec\n", dt); out_fit_close(f); // Output the results of high-intensity spots. f = out_fit_init(p, p->outfnH); sp = p->sp2; for (i=0, n=0; i < p->n_sp2; i++) { if (sp[i] && sp[i]->res) { out_fit(p, f, n, sp[i]); n++; } } out_fit_close(f); // Output the statistics and the sum of pixels. out_framests(p); out_framesum(p); } /*------------------------------------------------------------------------- * * Spot handling: output the images of all candidate spots. * *------------------------------------------------------------------------*/ void spot_output_img(para_t *p) { FILE *f; float *data; int i, j, n, imglen, *img; const char *fn; fn = "spot.img"; imglen = p->x_find_pixels * p->y_find_pixels; if ((f = fopen(fn, "wb")) == NULL) pstop("!!! spot_output_img: cannot open file: %s\n", fn); if ((data = malloc(imglen*sizeof(float))) == NULL) pstop("!!! spot_output_img: not enough memory.\n"); n = 0; for (i=0; i < p->n_sp1; i++) { if (p->sp1[i] == NULL || p->sp1[i]->img == NULL) continue; img = p->sp1[i]->img; for (j=0; j < imglen; j++) data[j] = (float)img[j]; fwrite(data, sizeof(float), imglen, f); n++; } printf("Number of found spots: %d\n", n); fclose(f); free(data); }
pw_threads.c
#include <papi_wrapper.h> #include <stdio.h> #include <stdlib.h> #include "test_lib.h" int main() { int N = 1000; int x[N]; pw_init_start_instruments; #pragma omp parallel for for (int i = 0; i < N; ++i) { x[i] = i * 42.3; } pw_stop_instruments; pw_print_instruments; /* avoid code elimination */ for (int i = 0; i < N; ++i) { if (i % 100 == 0) { printf("x[%d]\t%d\n", i, x[i]); } } return pw_test_pass(__FILE__); }
matrixutil.h
#pragma once #include <global.h> #include <memoryutil.h> #include <randomutil.h> ns_easyquantum template<typename Ty, typename uint, typename allocator> struct DenseVector; /* Class of dense matrix. */ template<typename Ty, typename uint = unsigned int, typename allocator = safe_allocator<Ty>> struct DenseMatrix { using size_type = uint; using uint_t = uint; size_type size = 0; Ty* data = nullptr; DenseMatrix() {} void initialize(size_type size_, allocator A = allocator()) { assert(size == 0 && data == nullptr, "Only empty matrix can be initialized."); size = size_; // data = new Ty[size * size]; data = A.allocate(size * size); for (size_type i = 0; i < size * size; ++i) data[i] = 0; } DenseMatrix(std::initializer_list<Ty> list) { size_type s = (size_type)list.size(); size_type size0 = intsqrt(s); assert(size0 * size0 == s, "Input size should be n*n."); initialize(size0); auto p = list.begin(); for (size_type i = 0; i < size0 * size0; ++i) { data[i] = *p; ++p; } } /* Create a size*size matrix, all values cleared to 0. */ DenseMatrix(size_type size_, allocator A = allocator()) { initialize(size_, A); } /* Copy constructor. Deep copy. */ DenseMatrix(const DenseMatrix<Ty, uint_t, allocator>& m) { size = m.size; // data = new Ty[m.size*m.size]; allocator A; data = A.allocate(m.size * m.size); for (size_type i = 0; i < size*size; ++i) data[i] = m.data[i]; } /* Get a reference to one element. You can modify this element. */ Ty& get(size_type x, size_type y) { return data[x * size + y]; } /* Same as get(x,y). */ Ty& operator()(size_type x, size_type y) { return get(x, y); } const Ty& at(size_type x, size_type y) const { return data[x * size + y]; } /* Matrix multiplication without optimization. */ DenseMatrix<Ty, uint_t, allocator> operator*(const DenseMatrix<Ty, uint_t, allocator>& m) const { DenseMatrix<Ty, uint_t, allocator> newm(size); for (size_type i = 0; i < size; ++i) { for (size_type j = 0; j < size; ++j) { for (size_type k = 0; k < size; ++k) { newm(i, j) += (*this).at(i, k) * m.at(k, j); } } } return newm; } /* Stringify this matrix. */ std::string to_string() const { std::stringstream ss; for (size_type i = 0; i < size; ++i) { for (size_type j = 0; j < size; ++j) { ss << at(i, j) << " "; } ss << endl; } return ss.str(); } /* Write the matrix into a matlab readable file. */ void write_matlab_file(std::string filename) const { ofstream out(filename, ios::out); for (size_type i = 0; i < size; ++i) { for (size_type j = 0; j < size; ++j) { out << at(i, j) << " "; } out << endl; } } /* Destructor. Delete all allocated spaces. */ ~DenseMatrix() { delete[] data; } }; /* Class of dense vector. */ template<typename Ty, typename uint = unsigned int, typename allocator = safe_allocator<Ty>> struct DenseVector { using size_type = uint; size_type size = 0; Ty* data = nullptr; DenseVector() {} void initialize(size_type size_, allocator A = allocator()) { assert(size == 0 && data == nullptr, "Only empty matrix can be initialized."); size = size_; // data = new Ty[size]; data = A.allocate(size); for (size_type i = 0; i < size; ++i) data[i] = 0; } void clear() { for (size_type i = 0; i < size; ++i) data[i] = 0; } /* Create a size*size matrix, all values cleared to 0. */ DenseVector(size_type size_, allocator A = allocator()) { initialize(size_, A); } DenseVector(std::initializer_list<Ty> list) { initialize((size_type)list.size()); size_type i = 0; for (Ty it : list) { data[i] = it; ++i; } } /* Copy Constructor. Deep copy. */ DenseVector(const DenseVector<Ty, uint, allocator>& m) { size = m.size; // data = new Ty[m.size]; allocator A; data = A.allocate(m.size); for (size_type i = 0; i < size; ++i) data[i] = m.data[i]; } /* Assign operator. First free the target space, then deep copy. */ DenseVector<Ty, uint, allocator>& operator=(const DenseVector<Ty, uint, allocator>& m) { allocator A; A.deallocate(data, -1); size = m.size; data = A.allocate(m.size); for (size_type i = 0; i < size; ++i) data[i] = m.data[i]; return *this; } /* Vector addition operator. */ DenseVector<Ty, uint, allocator> operator+(const DenseVector<Ty, uint, allocator>& v) const { assert(v.size == size); DenseVector<Ty, uint, allocator> vout(*this); for (size_type i = 0; i < size; ++i) vout.data[i] += v.data[i]; return vout; } /* Vector subtraction operator. */ DenseVector<Ty, uint, allocator> operator-(const DenseVector<Ty, uint, allocator>& v) const { assert(v.size == size); DenseVector<Ty, uint, allocator> vout(*this); for (size_type i = 0; i < size; ++i) vout.data[i] -= v.data[i]; return vout; } /* Get a reference to one element. You can modify this element. */ Ty& get(size_type x) { assert(x >= 0 && x < size); return data[x]; } /* Same as get(x). */ Ty& operator()(size_type x) { return get(x); } /* Same as get(x). */ Ty& operator[](size_type x) { return get(x); } const Ty& at(size_type x) const { return data[x]; } /* Compute the l2 norm. */ Ty norm2() { Ty sum = 0; for (size_type i = 0; i < size; ++i) sum += (data[i] * data[i]); return sqrt(sum); } /* Compute the l-inf norm. */ Ty normInf() { Ty ninf = 0; for (size_type i = 0; i < size; ++i) { if (abs(data[i]) > ninf) { ninf = abs(data[i]); } } return ninf; } /* Stringify this vector. */ std::string to_string() { std::stringstream ss; for (size_type i = 0; i < size; ++i) { ss << data[i] << endl; } return ss.str(); } /* Write the vector into a matlab readable file. */ void write_matlab_file(std::string filename) { ofstream out(filename, ios::out); for (size_type i = 0; i < size - 1; ++i) { out << get(i) << ";"; } out << get(size - 1); } /* Compute the maximum value. Output via the argument, return the index. It will only return the first maximum (if there are more than 1 maximum). */ size_type compute_max(Ty& maxvalue) { size_type idx = 0; maxvalue = get(0); for (size_type i = 1; i < size; ++i) { if (get(i) > maxvalue) { maxvalue = get(i); idx = i; } } return idx; } /* Compute the maximum value of its absolute value. Output via the argument, return the index. It will only return the first maximum (if there are more than 1 maximum). */ size_type maxabs(Ty& maxvalue) { size_type idx = 0; maxvalue = abs(get(0)); for (size_type i = 1; i < size; ++i) { if (abs(get(i)) > maxvalue) { maxvalue = abs(get(i)); idx = i; } } return idx; } /* Destructor. Delete all allocated spaces. */ ~DenseVector() { delete[] data; } }; /* Matrix multipies vector. Return a new vector. */ template<typename Ty, typename uint_t, typename allocator_t> DenseVector<Ty, uint_t, allocator_t> operator*(const DenseMatrix<Ty, uint_t, allocator_t> &m, const DenseVector<Ty, uint_t, allocator_t> &v) { assert(m.size == v.size, "Bad size"); DenseVector<Ty, uint_t, allocator_t> v2(m.size); //#pragma omp parallel for (DenseVector<Ty, uint_t, allocator_t>::size_type i = 0; i < m.size; ++i) { for (DenseVector<Ty, uint_t, allocator_t>::size_type j = 0; j < m.size; ++j) { v2(i) += m.at(i, j) * v.at(j); } } return v2; } /* Matrix multipies constant. */ template<typename Ty, typename uint_t, typename allocator_t> DenseVector<Ty, uint_t, allocator_t> operator*( const DenseVector<Ty, uint_t, allocator_t> &v, const Ty& a) { DenseVector<Ty, uint_t, allocator_t> v2(v.size); //#pragma omp parallel for (DenseVector<Ty, uint_t, allocator_t>::size_type i = 0; i < v.size; ++i) { v2(i) = v.at(i) * a; } return v2; } /* Matrix divides vector. */ template<typename Ty, typename uint_t, typename allocator_t> DenseVector<Ty, uint_t, allocator_t> operator/( const DenseVector<Ty, uint_t, allocator_t> v, const Ty& a) { return v * Ty(1.0 / a); } /* Get a column of the matrix. Return a new vector (not reference). */ template<typename Ty, typename uint_t, typename allocator_t> DenseVector<Ty, uint_t, allocator_t> get_column(const DenseMatrix<Ty, uint_t, allocator_t> &A, const typename DenseMatrix<Ty, uint_t, allocator_t>::size_type col) { DenseVector<Ty, uint_t, allocator_t> v = DenseVector<Ty, uint_t, allocator_t>(A.size); for (DenseVector<Ty, uint_t, allocator_t>::size_type i = 0; i < A.size; ++i) { v(i) = A(i, col); } return v; } constexpr double default_tolerance = 1e-3; template<typename Ty, typename uint_t, typename allocator_t> bool compare_vec_l2(const DenseVector<Ty, uint_t, allocator_t> &v1, const DenseVector<Ty, uint_t, allocator_t> &v2, Ty tolerance = default_tolerance) { assert(v1.size == v2.size, "Bad size"); assert(tolerance > 0, "Bad input."); if ((v1 - v2).norm2() < tolerance) return true; return false; } /* Compare two vectors. Tolerance = 1e-3. Return true if the difference of two vectors has norm2 less than the tolerance.*/ template<typename Ty, typename uint_t, typename allocator_t> bool operator==(const DenseVector<Ty, uint_t, allocator_t> &v1, const DenseVector<Ty, uint_t, allocator_t> &v2) { return compare_vec_l2(v1, v2, default_tolerance); } /* Compare two vectors. Tolerance = 1e-3. Return false if the difference of two vectors has norm2 less than the tolerance.*/ template<typename Ty, typename uint_t, typename allocator_t> bool operator!=(const DenseVector<Ty, uint_t, allocator_t> &v1, const DenseVector<Ty, uint_t, allocator_t> &v2) { return !(v1 == v2); } template<typename Ty, typename uint_t, typename allocator_t> bool compare_two_matrices(const DenseMatrix<Ty, uint_t, allocator_t> &m1, const DenseMatrix<Ty, uint_t, allocator_t> &m2, const Ty tolerance = default_tolerance) { assert(m1.size == m2.size, "Bad size."); assert(tolerance > 0, "Bad input."); for (DenseMatrix<Ty, uint_t, allocator_t>::size_type i = 0; i < m1.size * m1.size; ++i) { if (abs(m1.data[i] - m2.data[i]) > tolerance) return false; } return true; } /* Compare two matrices. Tolerance = 1e-3.*/ template<typename Ty, typename uint_t, typename allocator_t> bool operator==(const DenseMatrix<Ty, uint_t, allocator_t> &m1, const DenseMatrix<Ty, uint_t, allocator_t> &m2) { return compare_two_matrices(m1, m2); } /* Compare two matrices. Tolerance = 1e-3.*/ template<typename Ty, typename uint_t, typename allocator_t> bool operator!=(const DenseMatrix<Ty, uint_t, allocator_t> &m1, const DenseMatrix<Ty, uint_t, allocator_t> &m2) { return !(m1==m2); } /* Compare two vectors. Print the different elements. */ template<typename Ty, typename uint_t, typename allocator_t> void check_vec(const DenseVector<Ty, uint_t, allocator_t> &v1, const DenseVector<Ty, uint_t, allocator_t> &v2, const Ty tolerance = default_tolerance) { assert(v1.size == v2.size, "Bad size"); assert(tolerance > 0, "Bad input."); for (DenseVector<Ty, uint_t, allocator_t>::size_type i = 0; i < v1.size; ++i) { if (abs(v1(i) - v2(i)) > tolerance) { cout << "Bad: " << i << "\t" << "v1: " << v1(i) << "\tv2:" << v2(i) << endl; } } } /* Swap two rows in both a matrix and a vector (used in Gaussian elimination). */ template<typename Ty, typename uint_t, typename allocator_t> void swap_two_rows(DenseMatrix<Ty, uint_t, allocator_t>& A, DenseVector<Ty, uint_t, allocator_t>& b, typename DenseMatrix<Ty, uint_t, allocator_t>::size_type row1, typename DenseMatrix<Ty, uint_t, allocator_t>::size_type row2) { swap(b(row1), b(row2)); //#pragma omp parallel for (typename DenseMatrix<Ty, uint_t, allocator_t>::size_type i = 0; i < A.size; ++i) { swap(A(row1, i), A(row2, i)); } } /* Perform row elimination step (used in Gaussian elimination). */ template<typename Ty, typename uint_t, typename allocator_t> void row_elimination(DenseMatrix<Ty, uint_t, allocator_t>& A, DenseVector<Ty, uint_t, allocator_t>& b, typename DenseMatrix<Ty, uint_t, allocator_t>::size_type row, typename DenseMatrix<Ty, uint_t, allocator_t>::size_type row2) { typename DenseMatrix<Ty, uint_t, allocator_t>::size_type size = A.size; assert(A(row, row) != 0 && A.size == b.size, "Bad Gaussian Solver."); if (A(row2, row) == 0) return; Ty coef = A(row2, row) / A(row, row); for (typename DenseMatrix<Ty, uint_t, allocator_t>::size_type j = row; j < size; ++j) { A(row2, j) -= (A(row, j)*coef); } b(row2) -= (b(row)*coef); } /* Perform column elimination step (used in Gaussian elimination). */ template<typename Ty, typename uint_t, typename allocator_t> void column_elimination(DenseMatrix<Ty, uint_t, allocator_t>& A, DenseVector<Ty, uint_t, allocator_t> &b, DenseVector<Ty, uint_t, allocator_t> &x) { static_assert(is_same_v<typename DenseMatrix<Ty, uint_t, allocator_t>::size_type, DenseVector<Ty, uint_t, allocator_t>::size_type>, "Bad type."); typename DenseMatrix<Ty, uint_t, allocator_t>::size_type size = A.size; x(size - 1) = b(size - 1) / A(size - 1, size - 1); for (typename DenseMatrix<Ty, uint_t, allocator_t>::size_type i = size - 2; i >= 0; --i) { Ty b0 = b(i); for (typename DenseMatrix<Ty, uint_t, allocator_t>::size_type j = i + 1; j < size; ++j) { b0 -= (A(i, j)*x(j)); } x(i) = b0 / A(i, i); } } /* Linear solver using gaussian elimination. Return the result vector. */ template<typename Ty, typename uint_t, typename allocator_t> DenseVector<Ty, uint_t, allocator_t> gaussian_linear_solver(DenseMatrix<Ty, uint_t, allocator_t>& A, DenseVector<Ty, uint_t, allocator_t> &b) { static_assert(is_same_v< typename DenseMatrix<Ty, uint_t, allocator_t>::size_type, typename DenseVector<Ty, uint_t, allocator_t>::size_type>, "Bad type."); assert(A.size == b.size, "Bad size"); typename DenseMatrix<Ty, uint_t, allocator_t>::size_type n = A.size; DenseVector<Ty, uint_t, allocator_t> x(n); for (int i = 0; i < n; ++i) { // find maximum row Ty maximum = 0.0; typename DenseMatrix<Ty, uint_t, allocator_t>::size_type j_ = i; for (typename DenseMatrix<Ty, uint_t, allocator_t>::size_type j = i + 1; j < n; ++j) { if (abs(A(j, i)) > maximum) { j_ = j; maximum = abs(A(j, i)); } } if (A(j_, i) == 0) { DenseVector<Ty, uint_t, allocator_t> vi = get_column(A, i); cout << vi.norm2(); assert(false, "No solution."); } for (typename DenseMatrix<Ty, uint_t, allocator_t>::size_type j = i + 1; j < n; ++j) { row_elimination(A, b, i, j); } } column_elimination(A, b, x); return x; } /* Created a random matrix. */ template<typename Ty, typename uint_t = unsigned int, typename allocator_t = safe_allocator<Ty>> DenseMatrix<Ty, uint_t, allocator_t> randmat( typename DenseMatrix<Ty, uint_t, allocator_t>::size_type size, RandomEngine* rng = nullptr) { RandomEngine *ptr = rng; bool allocated = false; if (rng == nullptr) { ptr = new DefaultRandomEngine(); allocated = true; } RandomEngine& rand = *ptr; DenseMatrix<Ty, uint_t, allocator_t> m(size); for (typename DenseMatrix<Ty, uint_t, allocator_t>::size_type i = 0; i < size; ++i) { for (typename DenseMatrix<Ty, uint_t, allocator_t>::size_type j = 0; j < size; ++j) { m(i, j) = rand(); } } if (allocated) { delete ptr; } return m; } /* Create a random vector. */ template<typename Ty, typename uint_t = unsigned int, typename allocator_t = safe_allocator<Ty>> DenseVector<Ty, uint_t, allocator_t> randvec( typename DenseVector<Ty, uint_t, allocator_t>::size_type size, RandomEngine *rng = nullptr) { RandomEngine* ptr = rng; bool allocated = false; if (rng == nullptr) { ptr = new DefaultRandomEngine(); allocated = true; } RandomEngine& rand = *ptr; DenseVector<Ty, uint_t, allocator_t> v(size); for (typename DenseMatrix<Ty, uint_t, allocator_t>::size_type i = 0; i < size; ++i) { v(i) = rand(); } if (allocated) { delete ptr; } return v; } /* Pick the elements over the threshold. Return the new vector. The count of picked elements is returned via the argument. */ template<typename Ty, typename uint_t, typename allocator_t> DenseVector<Ty, uint_t, allocator_t> pick_threshold( DenseVector<Ty, uint_t, allocator_t> &v, Ty threshold, size_t& counter) { DenseVector<Ty, uint_t, allocator_t> v2(v.size); assert(threshold > 0, "Bad input."); counter = 0; for (DenseVector<Ty, uint_t, allocator_t>::size_type i = 0; i < v.size; ++i) { Ty elem = v(i); if (abs(elem) > threshold) { counter++; v2(i) = elem; } } return v2; } ns_end
parallel-simple.c
/* * parallel-simple.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s // RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s // REQUIRES: tsan #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0; #pragma omp parallel num_threads(2) shared(var) { var++; } int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}parallel-simple.c:23 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}parallel-simple.c:23 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
GB_binop__bget_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_int32) // A.*B function (eWiseMult): GB (_AemultB_08__bget_int32) // A.*B function (eWiseMult): GB (_AemultB_02__bget_int32) // A.*B function (eWiseMult): GB (_AemultB_04__bget_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bget_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int32) // C=scalar+B GB (_bind1st__bget_int32) // C=scalar+B' GB (_bind1st_tran__bget_int32) // C=A+scalar GB (_bind2nd__bget_int32) // C=A'+scalar GB (_bind2nd_tran__bget_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = GB_BITGET (aij, bij, int32_t, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, int32_t, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_INT32 || GxB_NO_BGET_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bget_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bget_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bget_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, int32_t, 32) ; \ } GrB_Info GB (_bind1st_tran__bget_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, int32_t, 32) ; \ } GrB_Info GB (_bind2nd_tran__bget_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ocp_nlp_common.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_common.h" #include <assert.h> #include <stdlib.h> #include <string.h> #include <math.h> // blasfeo #include "blasfeo/include/blasfeo_common.h" #include "blasfeo/include/blasfeo_d_blas.h" // hpipm #include "hpipm/include/hpipm_d_ocp_qp_dim.h" // acados #include "acados/utils/mem.h" #include "acados/utils/print.h" // openmp #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif /************************************************ * config ************************************************/ acados_size_t ocp_nlp_config_calculate_size(int N) { acados_size_t size = 0; // self size += sizeof(ocp_nlp_config); // qp solver size += 1 * ocp_qp_xcond_solver_config_calculate_size(); // regularization size += ocp_nlp_reg_config_calculate_size(); // dynamics size += N * sizeof(ocp_nlp_dynamics_config *); for (int i = 0; i < N; i++) size += ocp_nlp_dynamics_config_calculate_size(); // cost size += (N + 1) * sizeof(ocp_nlp_cost_config *); for (int i = 0; i <= N; i++) size += ocp_nlp_cost_config_calculate_size(); // constraints size += (N + 1) * sizeof(ocp_nlp_constraints_config *); for (int i = 0; i <= N; i++) size += ocp_nlp_constraints_config_calculate_size(); return size; } ocp_nlp_config *ocp_nlp_config_assign(int N, void *raw_memory) { char *c_ptr = (char *) raw_memory; ocp_nlp_config *config = (ocp_nlp_config *) c_ptr; c_ptr += sizeof(ocp_nlp_config); config->N = N; // qp solver config->qp_solver = ocp_qp_xcond_solver_config_assign(c_ptr); c_ptr += ocp_qp_xcond_solver_config_calculate_size(); // regularization config->regularize = ocp_nlp_reg_config_assign(c_ptr); c_ptr += ocp_nlp_reg_config_calculate_size(); // dynamics config->dynamics = (ocp_nlp_dynamics_config **) c_ptr; c_ptr += N * sizeof(ocp_nlp_dynamics_config *); for (int i = 0; i < N; i++) { config->dynamics[i] = ocp_nlp_dynamics_config_assign(c_ptr); c_ptr += ocp_nlp_dynamics_config_calculate_size(); } // cost config->cost = (ocp_nlp_cost_config **) c_ptr; c_ptr += (N + 1) * sizeof(ocp_nlp_cost_config *); for (int i = 0; i <= N; i++) { config->cost[i] = ocp_nlp_cost_config_assign(c_ptr); c_ptr += ocp_nlp_cost_config_calculate_size(); } // constraints config->constraints = (ocp_nlp_constraints_config **) c_ptr; c_ptr += (N + 1) * sizeof(ocp_nlp_constraints_config *); for (int i = 0; i <= N; i++) { config->constraints[i] = ocp_nlp_constraints_config_assign(c_ptr); c_ptr += ocp_nlp_constraints_config_calculate_size(); } return config; } /************************************************ * dims ************************************************/ static acados_size_t ocp_nlp_dims_calculate_size_self(int N) { acados_size_t size = 0; size += sizeof(ocp_nlp_dims); // nlp sizes size += 6 * (N + 1) * sizeof(int); // nv, nx, nu, ni, nz, ns // dynamics size += N * sizeof(void *); // cost size += (N + 1) * sizeof(void *); // constraints size += (N + 1) * sizeof(void *); // regularization size += ocp_nlp_reg_dims_calculate_size(N); size += sizeof(ocp_nlp_reg_dims); size += 8; // initial align size += 8; // intermediate align make_int_multiple_of(8, &size); return size; } acados_size_t ocp_nlp_dims_calculate_size(void *config_) { ocp_nlp_config *config = config_; int N = config->N; acados_size_t size = 0; // self size += ocp_nlp_dims_calculate_size_self(N); // dynamics for (int i = 0; i < N; i++) size += config->dynamics[i]->dims_calculate_size(config->dynamics[i]); // cost for (int i = 0; i <= N; i++) size += config->cost[i]->dims_calculate_size(config->cost[i]); // constraints for (int i = 0; i <= N; i++) size += config->constraints[i]->dims_calculate_size(config->constraints[i]); // qp solver size += config->qp_solver->dims_calculate_size(config->qp_solver, N); return size; } static ocp_nlp_dims *ocp_nlp_dims_assign_self(int N, void *raw_memory) { char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_dims *dims = (ocp_nlp_dims *) c_ptr; c_ptr += sizeof(ocp_nlp_dims); // dynamics dims->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); // cost dims->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // constraints dims->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // nv assign_and_advance_int(N + 1, &dims->nv, &c_ptr); // nx assign_and_advance_int(N + 1, &dims->nx, &c_ptr); // nu assign_and_advance_int(N + 1, &dims->nu, &c_ptr); // ni assign_and_advance_int(N + 1, &dims->ni, &c_ptr); // nz assign_and_advance_int(N + 1, &dims->nz, &c_ptr); // ns assign_and_advance_int(N + 1, &dims->ns, &c_ptr); // intermediate align align_char_to(8, &c_ptr); // regularization dims->regularize = ocp_nlp_reg_dims_assign(N, c_ptr); c_ptr += ocp_nlp_reg_dims_calculate_size(N); /* initialize qp_solver dimensions */ // dims->qp_solver->N = N; // for (int i = 0; i <= N; i++) // { // TODO(dimitris): values below are needed for reformulation of QP when soft constraints // are not supported. Make this a bit more transparent as it clushes with nbx/nbu above. // dims->qp_solver->nsbx[i] = 0; // dims->qp_solver->nsbu[i] = 0; // dims->qp_solver->nsg[i] = 0; // } // N dims->N = N; // initialize dimensions to zero by default // nv for(int i=0; i<=N; i++) dims->nv[i] = 0; // nx for(int i=0; i<=N; i++) dims->nx[i] = 0; // nu for(int i=0; i<=N; i++) dims->nu[i] = 0; // ni for(int i=0; i<=N; i++) dims->ni[i] = 0; // nz for(int i=0; i<=N; i++) dims->nz[i] = 0; // ns for(int i=0; i<=N; i++) dims->ns[i] = 0; // TODO initialize dims to zero by default also in modules !!!!!!! // assert assert((char *) raw_memory + ocp_nlp_dims_calculate_size_self(N) >= c_ptr); return dims; } ocp_nlp_dims *ocp_nlp_dims_assign(void *config_, void *raw_memory) { ocp_nlp_config *config = config_; int N = config->N; char *c_ptr = (char *) raw_memory; // self ocp_nlp_dims *dims = ocp_nlp_dims_assign_self(N, c_ptr); c_ptr += ocp_nlp_dims_calculate_size_self(N); // dynamics for (int i = 0; i < N; i++) { dims->dynamics[i] = config->dynamics[i]->dims_assign(config->dynamics[i], c_ptr); c_ptr += config->dynamics[i]->dims_calculate_size(config->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { dims->cost[i] = config->cost[i]->dims_assign(config->cost[i], c_ptr); c_ptr += config->cost[i]->dims_calculate_size(config->cost[i]); } // constraints for (int i = 0; i <= N; i++) { dims->constraints[i] = config->constraints[i]->dims_assign(config->constraints[i], c_ptr); c_ptr += config->constraints[i]->dims_calculate_size(config->constraints[i]); } // qp solver dims->qp_solver = config->qp_solver->dims_assign(config->qp_solver, N, c_ptr); c_ptr += config->qp_solver->dims_calculate_size(config->qp_solver, N); // assert assert((char *) raw_memory + ocp_nlp_dims_calculate_size(config_) >= c_ptr); return dims; } void ocp_nlp_dims_set_opt_vars(void *config_, void *dims_, const char *field, const void* value_array) { // to set dimension nx, nu, nz, ns (number of slacks = number of soft constraints) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int N = config->N; int *int_array = (int *) value_array; /* set ocp_nlp dimension */ if (!strcmp(field, "nx")) { // opt var for (int i = 0; i <= N; i++) { // set nx dims->nx[i] = int_array[i]; // update nv dims->nv[i] = dims->nu[i] + dims->nx[i] + 2 * dims->ns[i]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nx", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx", &int_array[i]); } for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx1", &int_array[i+1]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nx", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nx", &int_array[i]); } // regularization for (int i = 0; i <= N; i++) { config->regularize->dims_set(config->regularize, dims->regularize, i, "nx", &int_array[i]); } } else if (!strcmp(field, "nu")) { // nlp opt var for (int i = 0; i <= N; i++) { // set nu dims->nu[i] = int_array[i]; // update nv dims->nv[i] = dims->nu[i] + dims->nx[i] + 2 * dims->ns[i]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nu", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu", &int_array[i]); } for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu1", &int_array[i+1]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nu", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nu", &int_array[i]); } // regularization for (int i = 0; i <= N; i++) { config->regularize->dims_set(config->regularize, dims->regularize, i, "nu", &int_array[i]); } } else if (!strcmp(field, "nz")) { // nlp opt var for (int i = 0; i <= N; i++) { // set nz dims->nz[i] = int_array[i]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nz", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nz", &int_array[i]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nz", &int_array[i]); } } else if (!strcmp(field, "ns")) { // nlp opt var for (int i = 0; i <= N; i++) { // set ns dims->ns[i] = int_array[i]; // update nv dims->nv[i] = dims->nu[i] + dims->nx[i] + 2 * dims->ns[i]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "ns", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ns", &int_array[i]); } } else { printf("error: dims type not available in module ocp_nlp: %s", field); exit(1); } #if 0 /* set ocp_nlp submodule dimensions */ if (strcmp(field, "ns")) // dynamics do not contain slack/soft constraints { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], field, &int_array[i]); } } if (!strcmp(field, "nu")) { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu1", &int_array[i+1]); } } if (!strcmp(field, "nx")) { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx1", &int_array[i+1]); } } for (int i = 0; i <= N; i++) // cost { config->cost[i]->dims_set(config->cost[i], dims->cost[i], field, &int_array[i]); } for (int i = 0; i <= N; i++) // constraints { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], field, &int_array[i]); } if (strcmp(field, "nz")) // qp_solver does not contain nz { for (int i = 0; i <= N; i++) // qp_solver { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, &int_array[i]); } } #endif } void ocp_nlp_dims_set_constraints(void *config_, void *dims_, int stage, const char *field, const void* value_) { // to set dimension nbx, nbu, ng, nh, nq (quadratic over nonlinear) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value_; int i = stage; // set in constraint module config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], field, int_value); // update ni in ocp_nlp dimensions config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "ni", &dims->ni[i]); // update qp_solver dims if ( (!strcmp(field, "nbx")) || (!strcmp(field, "nbu")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); // regularization config->regularize->dims_set(config->regularize, dims->regularize, i, (char *) field, int_value); } else if ( (!strcmp(field, "nsbx")) || (!strcmp(field, "nsbu")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); } else if ( (!strcmp(field, "ng")) || (!strcmp(field, "nh")) || (!strcmp(field, "nphi"))) { // update ng_qp_solver in qp_solver int ng_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "ng_qp_solver", &ng_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ng", &ng_qp_solver); // regularization config->regularize->dims_set(config->regularize, dims->regularize, i, "ng", &ng_qp_solver); } else if ( (!strcmp(field, "nsg")) || (!strcmp(field, "nsh")) || (!strcmp(field, "nsphi"))) { // update ng_qp_solver in qp_solver int nsg_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "nsg_qp_solver", &nsg_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nsg", &nsg_qp_solver); } else if ( (!strcmp(field, "nbxe")) || (!strcmp(field, "nbue")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); } else if ( (!strcmp(field, "nge")) || (!strcmp(field, "nhe")) || (!strcmp(field, "nphie"))) { // update ng_qp_solver in qp_solver int ng_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "nge_qp_solver", &ng_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nge", &ng_qp_solver); } } void ocp_nlp_dims_set_cost(void *config_, void *dims_, int stage, const char *field, const void* value_) { // to set dimension ny (output) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value_; config->cost[stage]->dims_set(config->cost[stage], dims->cost[stage], field, int_value); } void ocp_nlp_dims_set_dynamics(void *config_, void *dims_, int stage, const char *field, const void* value) { // mainly for gnsf dimensions ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value; config->dynamics[stage]->dims_set(config->dynamics[stage], dims->dynamics[stage], field, int_value); } /************************************************ * in ************************************************/ acados_size_t ocp_nlp_in_calculate_size_self(int N) { acados_size_t size = sizeof(ocp_nlp_in); size += N * sizeof(double); // Ts size += N * sizeof(void *); // dynamics size += (N + 1) * sizeof(void *); // cost size += (N + 1) * sizeof(void *); // constraints return size; } acados_size_t ocp_nlp_in_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims) { int N = dims->N; acados_size_t size = ocp_nlp_in_calculate_size_self(N); // dynamics for (int i = 0; i < N; i++) { size += config->dynamics[i]->model_calculate_size(config->dynamics[i], dims->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { size += config->cost[i]->model_calculate_size(config->cost[i], dims->cost[i]); } // constraints for (int i = 0; i <= N; i++) { size += config->constraints[i]->model_calculate_size(config->constraints[i], dims->constraints[i]); } size += 8; // initial align size += 8; // final align // make_int_multiple_of(64, &size); return size; } ocp_nlp_in *ocp_nlp_in_assign_self(int N, void *raw_memory) { char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_in *in = (ocp_nlp_in *) c_ptr; c_ptr += sizeof(ocp_nlp_in); // Ts assign_and_advance_double(N, &in->Ts, &c_ptr); // dynamics in->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); // cost in->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // constraints in->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); align_char_to(8, &c_ptr); return in; } ocp_nlp_in *ocp_nlp_in_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory) { int N = dims->N; char *c_ptr = (char *) raw_memory; // struct ocp_nlp_in *in = ocp_nlp_in_assign_self(N, c_ptr); c_ptr += ocp_nlp_in_calculate_size_self(N); // dynamics for (int i = 0; i < N; i++) { in->dynamics[i] = config->dynamics[i]->model_assign(config->dynamics[i], dims->dynamics[i], c_ptr); c_ptr += config->dynamics[i]->model_calculate_size(config->dynamics[i], dims->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { in->cost[i] = config->cost[i]->model_assign(config->cost[i], dims->cost[i], c_ptr); c_ptr += config->cost[i]->model_calculate_size(config->cost[i], dims->cost[i]); } // constraints for (int i = 0; i <= N; i++) { in->constraints[i] = config->constraints[i]->model_assign(config->constraints[i], dims->constraints[i], c_ptr); c_ptr += config->constraints[i]->model_calculate_size(config->constraints[i], dims->constraints[i]); } assert((char *) raw_memory + ocp_nlp_in_calculate_size(config, dims) >= c_ptr); return in; } /************************************************ * out ************************************************/ acados_size_t ocp_nlp_out_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; acados_size_t size = sizeof(ocp_nlp_out); size += 4 * (N + 1) * sizeof(struct blasfeo_dvec); // ux, lam, t, z size += 1 * N * sizeof(struct blasfeo_dvec); // pi for (int i = 0; i < N; i++) { size += 1 * blasfeo_memsize_dvec(nv[i]); // ux size += 1 * blasfeo_memsize_dvec(nz[i]); // z size += 2 * blasfeo_memsize_dvec(2 * ni[i]); // lam, t size += 1 * blasfeo_memsize_dvec(nx[i + 1]); // pi } size += 1 * blasfeo_memsize_dvec(nv[N]); // ux size += 1 * blasfeo_memsize_dvec(nz[N]); // z size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // lam, t size += 8; // initial align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_out *ocp_nlp_out_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory) { // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); ocp_nlp_out *out = (ocp_nlp_out *) c_ptr; c_ptr += sizeof(ocp_nlp_out); // blasfeo_struct align align_char_to(8, &c_ptr); // blasfeo_dvec_struct // ux assign_and_advance_blasfeo_dvec_structs(N + 1, &out->ux, &c_ptr); // z assign_and_advance_blasfeo_dvec_structs(N + 1, &out->z, &c_ptr); // pi assign_and_advance_blasfeo_dvec_structs(N, &out->pi, &c_ptr); // lam assign_and_advance_blasfeo_dvec_structs(N + 1, &out->lam, &c_ptr); // t assign_and_advance_blasfeo_dvec_structs(N + 1, &out->t, &c_ptr); // blasfeo_mem align align_char_to(64, &c_ptr); // blasfeo_dvec // ux for (int i = 0; i <= N; ++i) { assign_and_advance_blasfeo_dvec_mem(nv[i], out->ux + i, &c_ptr); } // z for (int i = 0; i <= N; ++i) { assign_and_advance_blasfeo_dvec_mem(nz[i], out->z + i, &c_ptr); } // pi for (int i = 0; i < N; ++i) { assign_and_advance_blasfeo_dvec_mem(nx[i + 1], out->pi + i, &c_ptr); } // lam for (int i = 0; i <= N; ++i) { assign_and_advance_blasfeo_dvec_mem(2 * ni[i], out->lam + i, &c_ptr); } // t for (int i = 0; i <= N; ++i) { assign_and_advance_blasfeo_dvec_mem(2 * ni[i], out->t + i, &c_ptr); } // zero solution for(int i=0; i<N; i++) { blasfeo_dvecse(nv[i], 0.0, out->ux+i, 0); blasfeo_dvecse(nz[i], 0.0, out->z+i, 0); blasfeo_dvecse(nx[i+1], 0.0, out->pi+i, 0); blasfeo_dvecse(2*ni[i], 0.0, out->lam+i, 0); blasfeo_dvecse(2*ni[i], 0.0, out->t+i, 0); } blasfeo_dvecse(nv[N], 0.0, out->ux+N, 0); blasfeo_dvecse(nz[N], 0.0, out->z+N, 0); blasfeo_dvecse(2*ni[N], 0.0, out->lam+N, 0); blasfeo_dvecse(2*ni[N], 0.0, out->t+N, 0); assert((char *) raw_memory + ocp_nlp_out_calculate_size(config, dims) >= c_ptr); return out; } /************************************************ * options ************************************************/ acados_size_t ocp_nlp_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; acados_size_t size = 0; size += sizeof(ocp_nlp_opts); size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); size += config->regularize->opts_calculate_size(); // dynamics size += N * sizeof(void *); for (int i = 0; i < N; i++) { size += dynamics[i]->opts_calculate_size(dynamics[i], dims->dynamics[i]); } // cost size += (N + 1) * sizeof(void *); for (int i = 0; i <= N; i++) { size += cost[i]->opts_calculate_size(cost[i], dims->cost[i]); } // constraints size += (N + 1) * sizeof(void *); for (int i = 0; i <= N; i++) { size += constraints[i]->opts_calculate_size(constraints[i], dims->constraints[i]); } size += 2*8; // 2 aligns return size; } void *ocp_nlp_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; char *c_ptr = (char *) raw_memory; align_char_to(8, &c_ptr); ocp_nlp_opts *opts = (ocp_nlp_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_opts); /* pointers to substructures */ opts->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); opts->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); opts->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); align_char_to(8, &c_ptr); /* substructures */ opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr); c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); opts->regularize = config->regularize->opts_assign(c_ptr); c_ptr += config->regularize->opts_calculate_size(); // dynamics for (int i = 0; i < N; i++) { opts->dynamics[i] = dynamics[i]->opts_assign(dynamics[i], dims->dynamics[i], c_ptr); c_ptr += dynamics[i]->opts_calculate_size(dynamics[i], dims->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { opts->cost[i] = cost[i]->opts_assign(cost[i], dims->cost[i], c_ptr); c_ptr += cost[i]->opts_calculate_size(cost[i], dims->cost[i]); } // constraints for (int i = 0; i <= N; i++) { opts->constraints[i] = constraints[i]->opts_assign(constraints[i], dims->constraints[i], c_ptr); c_ptr += constraints[i]->opts_calculate_size(constraints[i], dims->constraints[i]); } assert((char *) raw_memory + ocp_nlp_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; ocp_nlp_reg_config *regularize = config->regularize; int N = dims->N; opts->reuse_workspace = 1; #if defined(ACADOS_WITH_OPENMP) #if defined(ACADOS_NUM_THREADS) opts->num_threads = ACADOS_NUM_THREADS; // printf("\nocp_nlp: openmp threads from macro = %d\n", opts->num_threads); #else opts->num_threads = omp_get_max_threads(); // printf("\nocp_nlp: omp_get_max_threads %d", omp_get_max_threads()); #endif #endif // printf("\nocp_nlp: openmp threads = %d\n", opts->num_threads); opts->globalization = FIXED_STEP; opts->print_level = 0; opts->step_length = 1.0; opts->levenberg_marquardt = 0.0; /* submodules opts */ // qp solver qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize); // dynamics for (int i = 0; i < N; i++) { dynamics[i]->opts_initialize_default(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { cost[i]->opts_initialize_default(cost[i], dims->cost[i], opts->cost[i]); } // constraints for (int i = 0; i <= N; i++) { constraints[i]->opts_initialize_default(constraints[i], dims->constraints[i], opts->constraints[i]); } // globalization opts->alpha_min = 0.05; opts->alpha_reduction = 0.7; opts->full_step_dual = 0; opts->line_search_use_sufficient_descent = 0; opts->globalization_use_SOC = 0; opts->eps_sufficient_descent = 1e-4; // Leineweber1999: MUSCOD-I eps_T = 1e-4 (p.89); Note: eps_T = 0.1 originally proposed by Powell 1978 (Leineweber 1999, p. 53) return; } void ocp_nlp_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int i = 0; i < N; i++) { dynamics[i]->opts_update(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { cost[i]->opts_update(cost[i], dims->cost[i], opts->cost[i]); } // constraints for (int i = 0; i <= N; i++) { constraints[i]->opts_update(constraints[i], dims->constraints[i], opts->constraints[i]); } return; } void ocp_nlp_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_; ocp_nlp_config *config = config_; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name, i.e. substring in field before '_' char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (int i=0; i<module_length; i++) module[i] = field[i]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value); } else // nlp opts { if (!strcmp(field, "reuse_workspace")) { int* reuse_workspace = (int *) value; opts->reuse_workspace = *reuse_workspace; } else if (!strcmp(field, "num_threads")) { int* num_threads = (int *) value; opts->num_threads = *num_threads; } else if (!strcmp(field, "step_length")) { double* step_length = (double *) value; opts->step_length = *step_length; } else if (!strcmp(field, "alpha_reduction")) { double* alpha_reduction = (double *) value; opts->alpha_reduction = *alpha_reduction; } else if (!strcmp(field, "alpha_min")) { double* alpha_min = (double *) value; opts->alpha_min = *alpha_min; } else if (!strcmp(field, "eps_sufficient_descent")) { double* eps_sufficient_descent = (double *) value; opts->eps_sufficient_descent = *eps_sufficient_descent; } else if (!strcmp(field, "full_step_dual")) { int* full_step_dual = (int *) value; opts->full_step_dual = *full_step_dual; } else if (!strcmp(field, "line_search_use_sufficient_descent")) { int* line_search_use_sufficient_descent = (int *) value; opts->line_search_use_sufficient_descent = *line_search_use_sufficient_descent; } else if (!strcmp(field, "globalization_use_SOC")) { int* globalization_use_SOC = (int *) value; opts->globalization_use_SOC = *globalization_use_SOC; } else if (!strcmp(field, "globalization")) { char* globalization = (char *) value; if (!strcmp(globalization, "fixed_step")) { opts->globalization = FIXED_STEP; } else if (!strcmp(globalization, "merit_backtracking")) { opts->globalization = MERIT_BACKTRACKING; } else { printf("\nerror: ocp_nlp_opts_set: not supported value for globalization, got: %s\n", globalization); exit(1); } } else if (!strcmp(field, "levenberg_marquardt")) { double* levenberg_marquardt = (double *) value; opts->levenberg_marquardt = *levenberg_marquardt; } else if (!strcmp(field, "exact_hess")) { int N = config->N; // cost for (int i=0; i<=N; i++) config->cost[i]->opts_set(config->cost[i], opts->cost[i], "exact_hess", value); // dynamics for (int i=0; i<N; i++) config->dynamics[i]->opts_set(config->dynamics[i], opts->dynamics[i], "compute_hess", value); // constraints for (int i=0; i<=N; i++) config->constraints[i]->opts_set(config->constraints[i], opts->constraints[i], "compute_hess", value); } // selectively turn on exact hessian contributions else if (!strcmp(field, "exact_hess_cost")) { int N = config->N; for (int i=0; i<=N; i++) config->cost[i]->opts_set(config->cost[i], opts->cost[i], "exact_hess", value); } else if (!strcmp(field, "exact_hess_dyn")) { int N = config->N; for (int i=0; i<N; i++) config->dynamics[i]->opts_set(config->dynamics[i], opts->dynamics[i], "compute_hess", value); } else if (!strcmp(field, "exact_hess_constr")) { int N = config->N; for (int i=0; i<=N; i++) config->constraints[i]->opts_set(config->constraints[i], opts->constraints[i], "compute_hess", value); } else if (!strcmp(field, "print_level")) { int* print_level = (int *) value; if (*print_level < 0) { printf("\nerror: ocp_nlp_opts_set: invalid value for print_level field, need int >=0, got %d.\n", *print_level); exit(1); } opts->print_level = *print_level; } else { printf("\nerror: ocp_nlp_opts_set: wrong field: %s\n", field); exit(1); } } return; } void ocp_nlp_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value) { ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_; ocp_nlp_config *config = config_; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (int i=0; i<module_length; i++) module[i] = field[i]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to dynamics module if ( ptr_module!=NULL && (!strcmp(ptr_module, "dynamics")) ) { config->dynamics[stage]->opts_set( config->dynamics[stage], opts->dynamics[stage], field+module_length+1, value ); } // pass options to cost module else if ( ptr_module!=NULL && (!strcmp(ptr_module, "cost")) ) { config->cost[stage]->opts_set( config->cost[stage], opts->cost[stage], field+module_length+1, value); } // pass options to constraint module else if ( ptr_module!=NULL && (!strcmp(ptr_module, "constraints")) ) { config->constraints[stage]->opts_set( config->constraints[stage], opts->constraints[stage], (char *) field+module_length+1, value); } else { printf("\nerror: ocp_nlp_opts_set_at_stage: wrong field: %s\n", field); exit(1); } return; } /************************************************ * memory ************************************************/ acados_size_t ocp_nlp_memory_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nz = dims->nz; int *nu = dims->nu; int *ni = dims->ni; acados_size_t size = sizeof(ocp_nlp_memory); // qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); // qp solver size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // dynamics size += N * sizeof(void *); for (int i = 0; i < N; i++) { size += dynamics[i]->memory_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost size += (N + 1) * sizeof(void *); for (int i = 0; i <= N; i++) { size += cost[i]->memory_calculate_size(cost[i], dims->cost[i], opts->cost[i]); } // constraints size += (N + 1) * sizeof(void *); for (int i = 0; i <= N; i++) { size += constraints[i]->memory_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]); } // nlp res size += ocp_nlp_res_calculate_size(dims); size += (N+1)*sizeof(bool); // set_sim_guess size += (N+1)*sizeof(struct blasfeo_dmat); // dzduxt size += 6*(N+1)*sizeof(struct blasfeo_dvec); // cost_grad ineq_fun ineq_adj dyn_adj sim_guess z_alg size += 1*N*sizeof(struct blasfeo_dvec); // dyn_fun for (int i = 0; i < N; i++) { size += 1*blasfeo_memsize_dmat(nu[i]+nx[i], nz[i]); // dzduxt size += 1*blasfeo_memsize_dvec(nz[i]); // z_alg size += 2*blasfeo_memsize_dvec(nv[i]); // cost_grad ineq_adj size += 1*blasfeo_memsize_dvec(nu[i] + nx[i]); // dyn_adj size += 1*blasfeo_memsize_dvec(nx[i + 1]); // dyn_fun size += 1*blasfeo_memsize_dvec(2 * ni[i]); // ineq_fun size += 1*blasfeo_memsize_dvec(nx[i] + nz[i]); // sim_guess } size += 1*blasfeo_memsize_dmat(nu[N]+nx[N], nz[N]); // dzduxt size += 1*blasfeo_memsize_dvec(nz[N]); // z_alg size += 2*blasfeo_memsize_dvec(nv[N]); // cost_grad ineq_adj size += 1*blasfeo_memsize_dvec(nu[N] + nx[N]); // dyn_adj size += 1*blasfeo_memsize_dvec(2 * ni[N]); // ineq_fun size += 1*blasfeo_memsize_dvec(nx[N] + nz[N]); // sim_guess size += 8; // initial align size += 8; // middle align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_memory *ocp_nlp_memory_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, void *raw_memory) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nz = dims->nz; int *nu = dims->nu; int *ni = dims->ni; char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_memory *mem = (ocp_nlp_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_memory); /* pointers to substructures */ // dynamics mem->dynamics = (void **) c_ptr; c_ptr += N*sizeof(void *); // cost mem->cost = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // constraints mem->constraints = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // middle align align_char_to(8, &c_ptr); /* substructures */ // qp in mem->qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out mem->qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); // QP solver mem->qp_solver_mem = qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr); c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize, opts->regularize, c_ptr); c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // dynamics for (int i = 0; i < N; i++) { mem->dynamics[i] = dynamics[i]->memory_assign(dynamics[i], dims->dynamics[i], opts->dynamics[i], c_ptr); c_ptr += dynamics[i]->memory_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { mem->cost[i] = cost[i]->memory_assign(cost[i], dims->cost[i], opts->cost[i], c_ptr); c_ptr += cost[i]->memory_calculate_size(cost[i], dims->cost[i], opts->cost[i]); } // constraints for (int i = 0; i <= N; i++) { mem->constraints[i] = constraints[i]->memory_assign(constraints[i], dims->constraints[i], opts->constraints[i], c_ptr); c_ptr += constraints[i]->memory_calculate_size( constraints[i], dims->constraints[i], opts->constraints[i]); } // nlp res mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr); c_ptr += mem->nlp_res->memsize; // blasfeo_struct align align_char_to(8, &c_ptr); // dzduxt assign_and_advance_blasfeo_dmat_structs(N + 1, &mem->dzduxt, &c_ptr); // z_alg assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->z_alg, &c_ptr); // cost_grad assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->cost_grad, &c_ptr); // ineq_fun assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_fun, &c_ptr); // ineq_adj assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_adj, &c_ptr); // dyn_fun assign_and_advance_blasfeo_dvec_structs(N, &mem->dyn_fun, &c_ptr); // dyn_adj assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->dyn_adj, &c_ptr); // sim_guess assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->sim_guess, &c_ptr); // set_sim_guess assign_and_advance_bool(N+1, &mem->set_sim_guess, &c_ptr); for (int i = 0; i <= N; ++i) { mem->set_sim_guess[i] = false; } // blasfeo_mem align align_char_to(64, &c_ptr); // dzduxt for (int i=0; i<=N; i++) { assign_and_advance_blasfeo_dmat_mem(nu[i]+nx[i], nz[i], mem->dzduxt+i, &c_ptr); } // z_alg for (int i=0; i<=N; i++) { blasfeo_create_dvec(nz[i], mem->z_alg+i, c_ptr); c_ptr += blasfeo_memsize_dvec(nz[i]); } // cost_grad for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(nv[i], mem->cost_grad + i, &c_ptr); } // ineq_fun for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[i], mem->ineq_fun + i, &c_ptr); } // ineq_adj for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(nv[i], mem->ineq_adj + i, &c_ptr); } // dyn_fun for (int i = 0; i < N; i++) { assign_and_advance_blasfeo_dvec_mem(nx[i + 1], mem->dyn_fun + i, &c_ptr); } // dyn_adj for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(nu[i] + nx[i], mem->dyn_adj + i, &c_ptr); } // sim_guess for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(nx[i] + nz[i], mem->sim_guess + i, &c_ptr); // set to 0; blasfeo_dvecse(nx[i] + nz[i], 0.0, mem->sim_guess+i, 0); // printf("sim_guess i %d: %p\n", i, mem->sim_guess+i); } // printf("created memory %p\n", mem); return mem; } /************************************************ * workspace ************************************************/ acados_size_t ocp_nlp_workspace_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; // int *nz = dims->nz; acados_size_t size = 0; // nlp size += sizeof(ocp_nlp_workspace); // tmp_nlp_out size += ocp_nlp_out_calculate_size(config, dims); // weight_merit_fun size += ocp_nlp_out_calculate_size(config, dims); // blasfeo_dvec int nxu_max = 0; int nx_max = 0; int ni_max = 0; for (int i = 0; i <= N; i++) { nx_max = nx_max > nx[i] ? nx_max : nx[i]; nxu_max = nxu_max > (nx[i]+nu[i]) ? nxu_max : (nx[i]+nu[i]); ni_max = ni_max > ni[i] ? ni_max : ni[i]; } size += 1 * blasfeo_memsize_dvec(nx_max); size += 1 * blasfeo_memsize_dvec(nxu_max); size += 1 * blasfeo_memsize_dvec(ni_max); // array of pointers // cost size += (N+1)*sizeof(void *); // dynamics size += N*sizeof(void *); // constraints size += (N+1)*sizeof(void *); // module workspace if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int i = 0; i < N; i++) { size += dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { size += cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]); } // constraints for (int i = 0; i <= N; i++) { size += constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]); } #else acados_size_t size_tmp = 0; int tmp; // qp solver tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (int i = 0; i < N; i++) { tmp = dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (int i = 0; i <= N; i++) { tmp = cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (int i = 0; i <= N; i++) { tmp = constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } size += size_tmp; #endif } else { // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int i = 0; i < N; i++) { size += dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { size += cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]); } // constraints for (int i = 0; i <= N; i++) { size += constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]); } } size += 8; // struct align return size; } ocp_nlp_workspace *ocp_nlp_workspace_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, ocp_nlp_memory *mem, void *raw_memory) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; int *nx = dims->nx; // int *nv = dims->nv; int *nu = dims->nu; int *ni = dims->ni; // int *nz = dims->nz; char *c_ptr = (char *) raw_memory; ocp_nlp_workspace *work = (ocp_nlp_workspace *) c_ptr; c_ptr += sizeof(ocp_nlp_workspace); /* pointers to substructures */ // work->dynamics = (void **) c_ptr; c_ptr += N*sizeof(void *); // work->cost = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // work->constraints = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); align_char_to(8, &c_ptr); /* substructures */ // tmp_nlp_out work->tmp_nlp_out = ocp_nlp_out_assign(config, dims, c_ptr); c_ptr += ocp_nlp_out_calculate_size(config, dims); // weight_merit_fun work->weight_merit_fun = ocp_nlp_out_assign(config, dims, c_ptr); c_ptr += ocp_nlp_out_calculate_size(config, dims); // blasfeo_dvec int nxu_max = 0; int nx_max = 0; int ni_max = 0; for (int i = 0; i <= N; i++) { nx_max = nx_max > nx[i] ? nx_max : nx[i]; nxu_max = nxu_max > (nx[i]+nu[i]) ? nxu_max : (nx[i]+nu[i]); ni_max = ni_max > ni[i] ? ni_max : ni[i]; } assign_and_advance_blasfeo_dvec_mem(nxu_max, &work->tmp_nxu, &c_ptr); assign_and_advance_blasfeo_dvec_mem(ni_max, &work->tmp_ni, &c_ptr); assign_and_advance_blasfeo_dvec_mem(nx_max, &work->dxnext_dy, &c_ptr); if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int i = 0; i < N; i++) { work->dynamics[i] = c_ptr; c_ptr += dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { work->cost[i] = c_ptr; c_ptr += cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]); } // constraints for (int i = 0; i <= N; i++) { work->constraints[i] = c_ptr; c_ptr += constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]); } #else acados_size_t size_tmp = 0; int tmp; // qp solver work->qp_work = (void *) c_ptr; tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (int i = 0; i < N; i++) { work->dynamics[i] = c_ptr; tmp = dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (int i = 0; i <= N; i++) { work->cost[i] = c_ptr; tmp = cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (int i = 0; i <= N; i++) { work->constraints[i] = c_ptr; tmp = constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } c_ptr += size_tmp; #endif } else { // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int i = 0; i < N; i++) { work->dynamics[i] = c_ptr; c_ptr += dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { work->cost[i] = c_ptr; c_ptr += cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]); } // constraints for (int i = 0; i <= N; i++) { work->constraints[i] = c_ptr; c_ptr += constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]); } } assert((char *) work + ocp_nlp_workspace_calculate_size(config, dims, opts) >= c_ptr); return work; } /************************************************ * functions ************************************************/ void ocp_nlp_alias_memory_to_submodules(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in, ocp_nlp_out *nlp_out, ocp_nlp_opts *opts, ocp_nlp_memory *nlp_mem, ocp_nlp_workspace *nlp_work) { #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel { // beginning of parallel region #endif int N = dims->N; // alias to dynamics_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (int i = 0; i < N; i++) { config->dynamics[i]->memory_set_ux_ptr(nlp_out->ux+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_ux1_ptr(nlp_out->ux+i+1, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_tmp_ux1_ptr(nlp_work->tmp_nlp_out->ux+i+1, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_pi_ptr(nlp_out->pi+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_tmp_pi_ptr(nlp_work->tmp_nlp_out->pi+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_BAbt_ptr(nlp_mem->qp_in->BAbt+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_dzduxt_ptr(nlp_mem->dzduxt+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_sim_guess_ptr(nlp_mem->sim_guess+i, nlp_mem->set_sim_guess+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_z_alg_ptr(nlp_mem->z_alg+i, nlp_mem->dynamics[i]); } #pragma omp parallel for for (int i = 0; i < N; i++) { i+=1; } // alias to cost_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (int i = 0; i <= N; i++) { config->cost[i]->memory_set_ux_ptr(nlp_out->ux+i, nlp_mem->cost[i]); config->cost[i]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+i, nlp_mem->cost[i]); config->cost[i]->memory_set_z_alg_ptr(nlp_mem->z_alg+i, nlp_mem->cost[i]); config->cost[i]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+i, nlp_mem->cost[i]); config->cost[i]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+i, nlp_mem->cost[i]); config->cost[i]->memory_set_Z_ptr(nlp_mem->qp_in->Z+i, nlp_mem->cost[i]); } // alias to constraints_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (int i = 0; i <= N; i++) { config->constraints[i]->memory_set_ux_ptr(nlp_out->ux+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_lam_ptr(nlp_out->lam+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_tmp_lam_ptr(nlp_work->tmp_nlp_out->lam+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_z_alg_ptr(nlp_mem->z_alg+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_DCt_ptr(nlp_mem->qp_in->DCt+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_idxb_ptr(nlp_mem->qp_in->idxb[i], nlp_mem->constraints[i]); config->constraints[i]->memory_set_idxs_rev_ptr(nlp_mem->qp_in->idxs_rev[i], nlp_mem->constraints[i]); config->constraints[i]->memory_set_idxe_ptr(nlp_mem->qp_in->idxe[i], nlp_mem->constraints[i]); } // alias to regularize memory config->regularize->memory_set_RSQrq_ptr(dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem); config->regularize->memory_set_rq_ptr(dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem); config->regularize->memory_set_BAbt_ptr(dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem); config->regularize->memory_set_b_ptr(dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem); config->regularize->memory_set_idxb_ptr(dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem); config->regularize->memory_set_DCt_ptr(dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem); config->regularize->memory_set_ux_ptr(dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem); config->regularize->memory_set_pi_ptr(dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem); config->regularize->memory_set_lam_ptr(dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem); // copy sampling times into dynamics model #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif // NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute; // -> remove here and make sure precompute is called everywhere (e.g. Python interface). for (int i = 0; i < N; i++) { config->dynamics[i]->model_set(config->dynamics[i], dims->dynamics[i], nlp_in->dynamics[i], "T", nlp_in->Ts+i); } #if defined(ACADOS_WITH_OPENMP) } // end of parallel region #endif return; } void ocp_nlp_initialize_qp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int N = dims->N; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i = 0; i <= N; i++) { // cost config->cost[i]->initialize(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); // dynamics if (i < N) config->dynamics[i]->initialize(config->dynamics[i], dims->dynamics[i], in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); // constraints config->constraints[i]->initialize(config->constraints[i], dims->constraints[i], in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } return; } void ocp_nlp_initialize_t_slacks(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { struct blasfeo_dvec *ineq_fun; int N = dims->N; int *ni = dims->ni; int *ns = dims->ns; int *nx = dims->nx; int *nu = dims->nu; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i = 0; i <= N; i++) { // copy out->ux to tmp_nlp_out->ux, since this is used in compute_fun blasfeo_dveccp(nx[i]+nu[i]+2*ns[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); // evaluate inequalities config->constraints[i]->compute_fun(config->constraints[i], dims->constraints[i], in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); ineq_fun = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); // t = -ineq_fun blasfeo_dveccpsc(2 * ni[i], -1.0, ineq_fun, 0, out->t + i, 0); } return; } void ocp_nlp_approximate_qp_matrices(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; /* stage-wise multiple shooting lagrangian evaluation */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i = 0; i <= N; i++) { // init Hessian to 0 blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, mem->qp_in->RSQrq+i, 0, 0); if (i < N) { // Levenberg Marquardt term: Ts[i] * levenberg_marquardt * eye() if (opts->levenberg_marquardt > 0.0) blasfeo_ddiare(nu[i] + nx[i], in->Ts[i] * opts->levenberg_marquardt, mem->qp_in->RSQrq+i, 0, 0); // dynamics config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i], in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); } else { // Levenberg Marquardt term: 1.0 * levenberg_marquardt * eye() if (opts->levenberg_marquardt > 0.0) blasfeo_ddiare(nu[i] + nx[i], opts->levenberg_marquardt, mem->qp_in->RSQrq+i, 0, 0); } // cost config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); // constraints config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i], in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } /* collect stage-wise evaluations */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i=0; i <= N; i++) { // nlp mem: cost_grad struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]); blasfeo_dveccp(nv[i], cost_grad, 0, mem->cost_grad + i, 0); // nlp mem: dyn_fun if (i < N) { struct blasfeo_dvec *dyn_fun = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); blasfeo_dveccp(nx[i + 1], dyn_fun, 0, mem->dyn_fun + i, 0); } // nlp mem: dyn_adj if (i < N) { struct blasfeo_dvec *dyn_adj = config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]); blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, mem->dyn_adj + i, 0); } else { blasfeo_dvecse(nu[N] + nx[N], 0.0, mem->dyn_adj + N, 0); } if (i > 0) { struct blasfeo_dvec *dyn_adj = config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]); blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], mem->dyn_adj+i, nu[i], mem->dyn_adj+i, nu[i]); } // nlp mem: ineq_fun struct blasfeo_dvec *ineq_fun = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); blasfeo_dveccp(2 * ni[i], ineq_fun, 0, mem->ineq_fun + i, 0); // nlp mem: ineq_adj struct blasfeo_dvec *ineq_adj = config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]); blasfeo_dveccp(nv[i], ineq_adj, 0, mem->ineq_adj + i, 0); } for (int i = 0; i <= N; i++) { // TODO(rien) where should the update happen??? move to qp update ??? // TODO(all): fix and move where appropriate // if (i<N) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme != NULL && opts->scheme->type != exact) // { // for (int_t j = 0; j < nx; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j]; // for (int_t j = 0; j < nu; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j]; // } // } } } // update QP rhs for SQP (step prim var, abs dual var) // TODO(all): move in dynamics, cost, constraints modules ??? void ocp_nlp_approximate_qp_vectors_sqp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i = 0; i <= N; i++) { // g blasfeo_dveccp(nv[i], mem->cost_grad + i, 0, mem->qp_in->rqz + i, 0); // b if (i < N) blasfeo_dveccp(nx[i + 1], mem->dyn_fun + i, 0, mem->qp_in->b + i, 0); // d blasfeo_dveccp(2 * ni[i], mem->ineq_fun + i, 0, mem->qp_in->d + i, 0); } } void ocp_nlp_embed_initial_value(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int *ni = dims->ni; // constraints config->constraints[0]->bounds_update(config->constraints[0], dims->constraints[0], in->constraints[0], opts->constraints[0], mem->constraints[0], work->constraints[0]); // nlp mem: ineq_fun struct blasfeo_dvec *ineq_fun = config->constraints[0]->memory_get_fun_ptr(mem->constraints[0]); blasfeo_dveccp(2 * ni[0], ineq_fun, 0, mem->ineq_fun, 0); // d blasfeo_dveccp(2 * ni[0], mem->ineq_fun, 0, mem->qp_in->d, 0); } double ocp_nlp_compute_merit_gradient(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { /* computes merit function gradient at iterate: out -- using already evaluated gradients of submodules with weights: work->weight_merit_fun */ int i, j; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; double merit_grad = 0.0; double weight; // NOTE: step is in: mem->qp_out->ux struct blasfeo_dvec *tmp_vec; // size nv struct blasfeo_dvec tmp_vec_nxu = work->tmp_nxu; // size nxu struct blasfeo_dvec dxnext_dy = work->dxnext_dy; // size nx // cost for (i=0; i<=N; i++) { tmp_vec = config->cost[i]->memory_get_grad_ptr(mem->cost[i]); merit_grad += blasfeo_ddot(nv[i], tmp_vec, 0, mem->qp_out->ux + i, 0); } double merit_grad_cost = merit_grad; /* dynamics */ double merit_grad_dyn = 0.0; for (i=0; i<N; i++) { // get shooting node gap x_next(x_n, u_n) - x_{n+1}; tmp_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); /* compute directional derivative of xnext with direction y -> dxnext_dy */ blasfeo_dgemv_t(nx[i]+nu[i], nx[i+1], 1.0, mem->qp_in->BAbt+i, 0, 0, mem->qp_out->ux+i, 0, 0.0, &dxnext_dy, 0, &dxnext_dy, 0); /* add merit gradient contributions depending on sign of shooting gap */ for (j = 0; j < nx[i+1]; j++) { weight = BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j); double deqj_dy = BLASFEO_DVECEL(&dxnext_dy, j) - BLASFEO_DVECEL(mem->qp_out->ux+(i+1), nu[i+1]+j); { if (BLASFEO_DVECEL(tmp_vec, j) > 0) { merit_grad_dyn += weight * deqj_dy; // printf("\ndyn_contribution +%e, weight %e, deqj_dy %e, i %d, j %d", weight * deqj_dy, weight, deqj_dy, i, j); } else { merit_grad_dyn -= weight * deqj_dy; // printf("\ndyn_contribution %e, weight %e, deqj_dy %e, i %d, j %d", -weight * deqj_dy, weight, deqj_dy, i, j); } } } } /* inequality contributions */ // NOTE: slack bound inequalities are not considered here. // They should never be infeasible. Only if explicitly initialized infeasible from outside. int constr_index, slack_index_in_ux, slack_index; ocp_qp_dims* qp_dims = mem->qp_in->dim; int *nb = qp_dims->nb; int *ng = qp_dims->ng; int *ns = qp_dims->ns; double merit_grad_ineq = 0.0; double slack_step; for (i=0; i<=N; i++) { tmp_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); int *idxb = mem->qp_in->idxb[i]; if (ni[i] > 0) { // NOTE: loop could be simplified handling lower and upper constraints together. for (j = 0; j < 2 * (nb[i] + ng[i]); j++) // 2 * ni { double constraint_val = BLASFEO_DVECEL(tmp_vec, j); if (constraint_val > 0) { weight = BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j); // find corresponding slack value constr_index = j < nb[i]+ng[i] ? j : j-(nb[i]+ng[i]); slack_index = mem->qp_in->idxs_rev[i][constr_index]; // if softened: add slack contribution if (slack_index >= 0) { slack_index_in_ux = j < (nb[i]+ng[i]) ? nx[i] + nu[i] + slack_index : nx[i] + nu[i] + slack_index + ns[i]; slack_step = BLASFEO_DVECEL(mem->qp_out->ux+i, slack_index_in_ux); merit_grad_ineq -= weight * slack_step; // printf("at node %d, ineq %d, idxs_rev[%d] = %d\n", i, j, constr_index, slack_index); // printf("slack contribution: uxs[%d] = %e\n", slack_index_in_ux, slack_step); } // NOTE: the inequalities are internally organized in the following order: // [ lbu lbx lg lh lphi ubu ubx ug uh uphi; // lsbu lsbx lsg lsh lsphi usbu usbx usg ush usphi] // printf("constraint %d %d is active with value %e", i, j, constraint_val); if (j < nb[i]) { // printf("lower idxb[%d] = %d dir %f, constraint_val %f, nb = %d\n", j, idxb[j], BLASFEO_DVECEL(mem->qp_out->ux, idxb[j]), constraint_val, nb[i]); merit_grad_ineq += weight * BLASFEO_DVECEL(mem->qp_out->ux+i, idxb[j]); } else if (j < nb[i] + ng[i]) { // merit_grad_ineq += weight * mem->qp_in->DCt_j * dux blasfeo_dcolex(nx[i] + nu[i], mem->qp_in->DCt+i, j - nb[i], 0, &tmp_vec_nxu, 0); merit_grad_ineq += weight * blasfeo_ddot(nx[i] + nu[i], &tmp_vec_nxu, 0, mem->qp_out->ux+i, 0); // printf("general linear constraint lower contribution = %e, val = %e\n", blasfeo_ddot(nx[i] + nu[i], &tmp_vec_nxu, 0, mem->qp_out->ux+i, 0), constraint_val); } else if (j < 2*nb[i] + ng[i]) { // printf("upper idxb[%d] = %d dir %f, constraint_val %f, nb = %d\n", j-nb[i]-ng[i], idxb[j-nb[i]-ng[i]], BLASFEO_DVECEL(mem->qp_out->ux, idxb[j-nb[i]-ng[i]]), constraint_val, nb[i]); merit_grad_ineq += weight * BLASFEO_DVECEL(mem->qp_out->ux+i, idxb[j-nb[i]-ng[i]]); } else if (j < 2*nb[i] + 2*ng[i]) { blasfeo_dcolex(nx[i] + nu[i], mem->qp_in->DCt+i, j - 2*nb[i] - ng[i], 0, &tmp_vec_nxu, 0); merit_grad_ineq += weight * blasfeo_ddot(nx[i] + nu[i], &tmp_vec_nxu, 0, mem->qp_out->ux+i, 0); // printf("general linear constraint upper contribution = %e, val = %e\n", blasfeo_ddot(nx[i] + nu[i], &tmp_vec_nxu, 0, mem->qp_out->ux+i, 0), constraint_val); } } } } } // print_ocp_qp_dims(qp_dims); // print_ocp_qp_in(mem->qp_in); merit_grad = merit_grad_cost + merit_grad_dyn + merit_grad_ineq; if (opts->print_level > 1) printf("computed merit_grad = %e, merit_grad_cost = %e, merit_grad_dyn = %e, merit_grad_ineq = %e\n", merit_grad, merit_grad_cost, merit_grad_dyn, merit_grad_ineq); return merit_grad; } static double ocp_nlp_get_violation(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { // computes constraint violation infinity norm // assumes constraint functions are evaluated before, e.g. done in ocp_nlp_evaluate_merit_fun int i, j; int N = dims->N; int *nx = dims->nx; int *ni = dims->ni; struct blasfeo_dvec *tmp_fun_vec; double violation = 0.0; double tmp; for (i=0; i<N; i++) { tmp_fun_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); for (j=0; j<nx[i+1]; j++) { tmp = fabs(BLASFEO_DVECEL(tmp_fun_vec, j)); violation = tmp > violation ? tmp : violation; } } for (i=0; i<=N; i++) { tmp_fun_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); for (j=0; j<2*ni[i]; j++) { // Note constraint violation corresponds to > 0 tmp = BLASFEO_DVECEL(tmp_fun_vec, j); violation = tmp > violation ? tmp : violation; } } return violation; } double ocp_nlp_evaluate_merit_fun(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { /* computes merit function value at iterate: tmp_nlp_out, with weights: work->weight_merit_fun */ //int j; int N = dims->N; int *nx = dims->nx; int *ni = dims->ni; double merit_fun = 0.0; // compute fun value #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i=0; i<=N; i++) { // cost config->cost[i]->compute_fun(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); } #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i=0; i<N; i++) { // dynamics config->dynamics[i]->compute_fun(config->dynamics[i], dims->dynamics[i], in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); } #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i=0; i<=N; i++) { // constr config->constraints[i]->compute_fun(config->constraints[i], dims->constraints[i], in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } double *tmp_fun; double tmp; struct blasfeo_dvec *tmp_fun_vec; double cost_fun = 0.0; for(int i=0; i<=N; i++) { tmp_fun = config->cost[i]->memory_get_fun_ptr(mem->cost[i]); cost_fun += *tmp_fun; } double dyn_fun = 0.0; for(int i=0; i<N; i++) { tmp_fun_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); // printf("\nMerit: dyn will multiply tmp_fun, weights %d\n", i); // blasfeo_print_exp_tran_dvec(nx[i+1], tmp_fun_vec, 0); // blasfeo_print_exp_tran_dvec(nx[i+1], work->weight_merit_fun->pi+i, 0); for(int j=0; j<nx[i+1]; j++) { // printf("\n%e %e\n", fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)), fabs(BLASFEO_DVECEL(tmp_fun_vec, j))); dyn_fun += fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)) * fabs(BLASFEO_DVECEL(tmp_fun_vec, j)); } } double constr_fun = 0.0; for(int i=0; i<=N; i++) { // printf("\ni %d\n", i); tmp_fun_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); // blasfeo_print_exp_tran_dvec(2*ni[i], tmp_fun_vec, 0); // blasfeo_print_exp_tran_dvec(2*ni[i], work->weight_merit_fun->lam+i, 0); for (int j=0; j<2*ni[i]; j++) { tmp = BLASFEO_DVECEL(tmp_fun_vec, j); if (tmp > 0.0) { // tmp = constraint violation // printf("IN merit fun: ineq i %d, j %d tmp_fun %e, multiplier %e\n", i, j, tmp, BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)); constr_fun += fabs(BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)) * tmp; } } } merit_fun = cost_fun + dyn_fun + constr_fun; // printf("Merit fun: %e cost: %e dyn: %e constr: %e\n", merit_fun, cost_fun, dyn_fun, constr_fun); return merit_fun; } double ocp_nlp_line_search(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work, int check_early_termination) { int i, j; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *ni = dims->ni; double alpha = opts->step_length; double tmp0, tmp1, merit_fun1; ocp_qp_out *qp_out = mem->qp_out; // Line search version Jonathan // Following Leineweber1999, Section "3.5.1 Line Search Globalization" // TODO: check out more advanced step search Leineweber1995 if (opts->globalization == MERIT_BACKTRACKING) { // copy out (current iterate) to work->tmp_nlp_out for (i = 0; i <= N; i++) blasfeo_dveccp(nv[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); // for (i = 0; i < N; i++) // blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->tmp_nlp_out->pi+i, 0); // for (i = 0; i <= N; i++) // blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->tmp_nlp_out->lam+i, 0); // linear update of algebraic variables using state and input sensitivity // if (i < N) // { // blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0); // } /* modify/initialize merit function weights (Leineweber1999 M5.1, p.89) */ if (mem->sqp_iter[0]==0) { // initialize weights // equality merit weights = abs( eq multipliers of qp_sol ) for (i = 0; i < N; i++) { for (j=0; j<nx[i+1]; j++) { // tmp0 = fabs(BLASFEO_DVECEL(out->pi+i, j)); tmp0 = fabs(BLASFEO_DVECEL(qp_out->pi+i, j)); } } for (i = 0; i <= N; i++) { blasfeo_dveccp(2*ni[i], qp_out->lam+i, 0, work->weight_merit_fun->lam+i, 0); } } else { // update weights // printf("merit fun: update weights, sqp_iter = %d\n", mem->sqp_iter[0]); for (i = 0; i < N; i++) { for(j=0; j<nx[i+1]; j++) { // abs(lambda) (LW) tmp0 = fabs(BLASFEO_DVECEL(qp_out->pi+i, j)); // .5 * (abs(lambda) + sigma) tmp1 = 0.5 * (tmp0 + BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)); BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0 > tmp1 ? tmp0 : tmp1; } } for (i = 0; i <= N; i++) { for(j=0; j<2*ni[i]; j++) { // mu (LW) tmp0 = BLASFEO_DVECEL(qp_out->lam+i, j); // .5 * (mu + tau) tmp1 = 0.5 * (tmp0 + BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)); BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j) = tmp0>tmp1 ? tmp0 : tmp1; } } } if (1) // (mem->sqp_iter[0]!=0) // TODO: why does Leineweber do full step in first SQP iter? { double merit_fun0 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); double reduction_factor = opts->alpha_reduction; double max_next_merit_fun_val = merit_fun0; double eps_sufficient_descent = opts->eps_sufficient_descent; double dmerit_dy = 0.0; alpha = 1.0; // to avoid armijo evaluation and loop when checking if SOC should be done if (check_early_termination) { // TMP: // printf("tmp: merit_grad eval in early termination\n"); // dmerit_dy = ocp_nlp_compute_merit_gradient(config, dims, in, out, opts, mem, work); // TODO(oj): should the merit weight update be undone in case of early termination? double violation_current = ocp_nlp_get_violation(config, dims, in, out, opts, mem, work); // tmp_nlp_out = out + alpha * qp_out for (i = 0; i <= N; i++) blasfeo_daxpy(nv[i], alpha, qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); double violation_step = ocp_nlp_get_violation(config, dims, in, out, opts, mem, work); if (opts->print_level > 0) { printf("\npreliminary line_search: merit0 %e, merit1 %e; viol_current %e, viol_step %e\n", merit_fun0, merit_fun1, violation_current, violation_step); } if (merit_fun1 < merit_fun0 && violation_step < violation_current) { // TODO: check armijo in this case? return alpha; } else { return reduction_factor * reduction_factor; } } /* actual Line Search*/ if (opts->line_search_use_sufficient_descent) { // check Armijo-type sufficient descent condition Leinweber1999 (2.35); dmerit_dy = ocp_nlp_compute_merit_gradient(config, dims, in, out, opts, mem, work); if (dmerit_dy > 0.0) { if (dmerit_dy > 1e-6 && opts->print_level > 0) { printf("\nacados line search: found dmerit_dy = %e > 0. Setting it to 0.0 instead\n", dmerit_dy); } dmerit_dy = 0.0; } } // From Leineweber1999: eq (3.64) -> only relevant for adaptive integrators looking at Remark 3.2. // "It is noteworthy that our practical implementation takes into account the potential nonsmoothness introduced by the fact that certain components of the penalty function - namely the continuity condition residuals - are evaluated only within integration tolerance." // double sum_pi = 0.0; // for (i = 0; i < N; i++) // { // for (j = 0; j < dims->nx[i+1]; j++) // sum_pi += BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j); // } // double relaxed_val = 2.0 * 1e-6 * sum_pi; // if (abs(merit_fun0 - merit_fun1) < relaxed_val) // { // printf("\nexiting because of relaxed_val."); // break; // } for (j=0; alpha*reduction_factor > opts->alpha_min; j++) { // tmp_nlp_out = out + alpha * qp_out for (i = 0; i <= N; i++) blasfeo_daxpy(nv[i], alpha, qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); if (opts->print_level > 1) { printf("backtracking %d, merit_fun1 = %e, merit_fun0 %e\n", j, merit_fun1, merit_fun0); } // if (merit_fun1 < merit_fun0 && merit_fun1 > max_next_merit_fun_val) // { // printf("\nalpha %f would be accepted without sufficient descent condition", alpha); // } max_next_merit_fun_val = merit_fun0 + eps_sufficient_descent * dmerit_dy * alpha; if (merit_fun1 < max_next_merit_fun_val) { break; } else { alpha *= reduction_factor; } } } } return alpha; } void ocp_nlp_update_variables_sqp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work, double alpha) { int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i = 0; i <= N; i++) { // step in primal variables blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux + i, 0, out->ux + i, 0, out->ux + i, 0); // update dual variables if (opts->full_step_dual) { blasfeo_dveccp(2*ni[i], mem->qp_out->lam+i, 0, out->lam+i, 0); if (i < N) { blasfeo_dveccp(nx[i+1], mem->qp_out->pi+i, 0, out->pi+i, 0); } } else { // update duals with alpha step blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->lam+i, 0); blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->lam+i, 0, out->lam+i, 0, out->lam+i, 0); if (i < N) { blasfeo_dvecsc(nx[i+1], 1.0-alpha, out->pi+i, 0); blasfeo_daxpy(nx[i+1], alpha, mem->qp_out->pi+i, 0, out->pi+i, 0, out->pi+i, 0); } } // update slack values blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->t+i, 0); blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->t+i, 0, out->t+i, 0, out->t+i, 0); // linear update of algebraic variables using state and input sensitivity if (i < N) { blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0); } } } /************************************************ * residuals ************************************************/ acados_size_t ocp_nlp_res_calculate_size(ocp_nlp_dims *dims) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; acados_size_t size = sizeof(ocp_nlp_res); size += 3 * (N + 1) * sizeof(struct blasfeo_dvec); // res_stat res_ineq res_comp size += 1 * N * sizeof(struct blasfeo_dvec); // res_eq for (int i = 0; i < N; i++) { size += 1 * blasfeo_memsize_dvec(nv[i]); // res_stat size += 1 * blasfeo_memsize_dvec(nx[i + 1]); // res_eq size += 2 * blasfeo_memsize_dvec(2 * ni[i]); // res_ineq res_comp } size += 1 * blasfeo_memsize_dvec(nv[N]); // res_stat size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // res_ineq res_comp size += 8; // initial align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_res *ocp_nlp_res_assign(ocp_nlp_dims *dims, void *raw_memory) { char *c_ptr = (char *) raw_memory; // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_res *res = (ocp_nlp_res *) c_ptr; c_ptr += sizeof(ocp_nlp_res); // blasfeo_struct align align_char_to(8, &c_ptr); // res_stat assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_stat, &c_ptr); // res_eq assign_and_advance_blasfeo_dvec_structs(N, &res->res_eq, &c_ptr); // res_ineq assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_ineq, &c_ptr); // res_comp assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_comp, &c_ptr); // blasfeo_mem align align_char_to(64, &c_ptr); // res_stat for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(nv[i], res->res_stat + i, &c_ptr); } // res_eq for (int i = 0; i < N; i++) { assign_and_advance_blasfeo_dvec_mem(nx[i + 1], res->res_eq + i, &c_ptr); } // res_ineq for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[i], res->res_ineq + i, &c_ptr); } // res_comp for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[i], res->res_comp + i, &c_ptr); } res->memsize = ocp_nlp_res_calculate_size(dims); return res; } void ocp_nlp_res_compute(ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_res *res, ocp_nlp_memory *mem) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; double tmp_res; // res_stat res->inf_norm_res_stat = 0.0; for (int i = 0; i <= N; i++) { blasfeo_daxpy(nv[i], -1.0, mem->ineq_adj + i, 0, mem->cost_grad + i, 0, res->res_stat + i, 0); blasfeo_daxpy(nu[i] + nx[i], -1.0, mem->dyn_adj + i, 0, res->res_stat + i, 0, res->res_stat + i, 0); blasfeo_dvecnrm_inf(nv[i], res->res_stat + i, 0, &tmp_res); res->inf_norm_res_stat = tmp_res > res->inf_norm_res_stat ? tmp_res : res->inf_norm_res_stat; } // res_eq res->inf_norm_res_eq = 0.0; for (int i = 0; i < N; i++) { blasfeo_dveccp(nx[i + 1], mem->dyn_fun + i, 0, res->res_eq + i, 0); blasfeo_dvecnrm_inf(nx[i + 1], res->res_eq + i, 0, &tmp_res); res->inf_norm_res_eq = tmp_res > res->inf_norm_res_eq ? tmp_res : res->inf_norm_res_eq; } // res_ineq res->inf_norm_res_ineq = 0.0; for (int i = 0; i <= N; i++) { blasfeo_daxpy(2 * ni[i], 1.0, out->t + i, 0, mem->ineq_fun + i, 0, res->res_ineq + i, 0); blasfeo_dvecnrm_inf(2 * ni[i], res->res_ineq + i, 0, &tmp_res); res->inf_norm_res_ineq = tmp_res > res->inf_norm_res_ineq ? tmp_res : res->inf_norm_res_ineq; } // res_comp res->inf_norm_res_comp = 0.0; for (int i = 0; i <= N; i++) { blasfeo_dvecmul(2 * ni[i], out->lam + i, 0, out->t + i, 0, res->res_comp + i, 0); blasfeo_dvecnrm_inf(2 * ni[i], res->res_comp + i, 0, &tmp_res); res->inf_norm_res_comp = tmp_res > res->inf_norm_res_comp ? tmp_res : res->inf_norm_res_comp; } // printf("computed residuals stat: %e, eq: %e, ineq: %e, comp: %e\n", res->inf_norm_res_stat, res->inf_norm_res_eq, // res->inf_norm_res_ineq, res->inf_norm_res_comp); } void ocp_nlp_res_get_inf_norm(ocp_nlp_res *res, double *out) { double norm = res->inf_norm_res_stat; norm = (res->inf_norm_res_eq > norm) ? res->inf_norm_res_eq : norm; norm = (res->inf_norm_res_ineq > norm) ? res->inf_norm_res_ineq : norm; norm = (res->inf_norm_res_comp > norm) ? res->inf_norm_res_comp : norm; *out = norm; return; } void ocp_nlp_cost_compute(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { // extract dims int N = dims->N; double* tmp_cost = NULL; double total_cost = 0.0; for (int i = 0; i <= N; i++) { // set pointers // NOTE(oj): the cost compute function takes the tmp_ux_ptr as input, // since it is also used for globalization, // especially with primal variables that are NOT current SQP iterates. config->cost[i]->memory_set_tmp_ux_ptr(out->ux+i, mem->cost[i]); config->cost[i]->compute_fun(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); tmp_cost = config->cost[i]->memory_get_fun_ptr(mem->cost[i]); // printf("cost at stage %d = %e, total = %e\n", i, *tmp_cost, total_cost); total_cost += *tmp_cost; } mem->cost_value = total_cost; // printf("\ncomputed total cost: %e\n", total_cost); }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(3*t1-1,2)),ceild(24*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(12*t1+Ny+21,8)),floord(24*t2+Ny+20,8)),floord(24*t1-24*t2+Nz+Ny+19,8));t3++) { for (t4=max(max(max(0,ceild(3*t1-15,16)),ceild(24*t2-Nz-60,64)),ceild(8*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(12*t1+Nx+21,64)),floord(24*t2+Nx+20,64)),floord(8*t3+Nx+4,64)),floord(24*t1-24*t2+Nz+Nx+19,64));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),8*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),8*t3+6),64*t4+62),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
zposv.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_posv * * Computes the solution to a system of linear equations A * X = B, * where A is an n-by-n Hermitian positive definite matrix and X and B are * n-by-nrhs matrices. The Cholesky decomposition is used to factor A as * * \f[ A = L\times L^H, \f] if uplo = PlasmaLower, * or * \f[ A = U^H\times U, \f] if uplo = PlasmaUpper, * * where U is an upper triangular matrix and L is a lower triangular matrix. * The factored form of A is then used to solve the system of equations: * * A * X = B. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The number of linear equations, i.e., the order of the matrix A. * n >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of columns * of the matrix B. nrhs >= 0. * * @param[in,out] pA * On entry, the Hermitian positive definite matrix A. * If uplo = PlasmaUpper, the leading n-by-n upper triangular part of A * contains the upper triangular part of the matrix A, and the strictly * lower triangular part of A is not referenced. * If UPLO = 'L', the leading n-by-n lower triangular part of A * contains the lower triangular part of the matrix A, and the strictly * upper triangular part of A is not referenced. * On exit, if return value = 0, the factor U or L from * the Cholesky factorization A = U^H*U or A = L*L^H. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,n). * * @param[in,out] pB * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * @retval > 0 if i, the leading minor of order i of A is not * positive definite, so the factorization could not * be completed, and the solution has not been computed. * ******************************************************************************* * * @sa plasma_omp_zposv * @sa plasma_cposv * @sa plasma_dposv * @sa plasma_sposv * ******************************************************************************/ int plasma_zposv(plasma_enum_t uplo, int n, int nrhs, plasma_complex64_t *pA, int lda, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -3; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -5; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -7; } // quick return if (imin(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_potrf(plasma, PlasmaComplexDouble, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_triangular_create(PlasmaComplexDouble, uplo, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_ztr2desc(pA, lda, A, &sequence, &request); plasma_omp_zge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_zposv(uplo, A, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2tr(A, pA, lda, &sequence, &request); plasma_omp_zdesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_posv * * Solves a Hermitian positive definite system of linear equations * using Cholesky factorization. * Non-blocking tile version of plasma_zposv(). * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in,out] A * On entry, the Hermitian positive definite matrix A. * If uplo = PlasmaUpper, the leading n-by-n upper triangular part of A * contains the upper triangular part of the matrix A, and the strictly * lower triangular part of A is not referenced. * If UPLO = 'L', the leading n-by-n lower triangular part of A * contains the lower triangular part of the matrix A, and the strictly * upper triangular part of A is not referenced. * On exit, if return value = 0, the factor U or L from * the Cholesky factorization A = U^H*U or A = L*L^H. * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zposv * @sa plasma_omp_cposv * @sa plasma_omp_dposv * @sa plasma_omp_sposv * ******************************************************************************/ void plasma_omp_zposv(plasma_enum_t uplo, plasma_desc_t A, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pzpotrf(uplo, A, sequence, request); plasma_enum_t trans; trans = uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans; plasma_pztrsm(PlasmaLeft, uplo, trans, PlasmaNonUnit, 1.0, A, B, sequence, request); trans = uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans; plasma_pztrsm(PlasmaLeft, uplo, trans, PlasmaNonUnit, 1.0, A, B, sequence, request); }
fill_ints.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <complex.h> #include <assert.h> #include "config.h" #include "cint.h" #include "vhf/fblas.h" #include "pbc/optimizer.h" #include "np_helper/np_helper.h" #define INTBUFMAX 1000 #define INTBUFMAX10 8000 #define IMGBLK 80 #define OF_CMPLX 2 int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); static int shloc_partition(int *kshloc, int *ao_loc, int ksh0, int ksh1, int dkmax) { int ksh; int nloc = 0; int loclast = ao_loc[ksh0]; kshloc[0] = ksh0; for (ksh = ksh0+1; ksh < ksh1; ksh++) { assert(ao_loc[ksh+1] - ao_loc[ksh] < dkmax); if (ao_loc[ksh+1] - loclast > dkmax) { nloc += 1; kshloc[nloc] = ksh; loclast = ao_loc[ksh]; } } nloc += 1; kshloc[nloc] = ksh1; return nloc; } static void shift_bas(double *env_loc, double *env, double *Ls, int ptr, int iL) { env_loc[ptr+0] = env[ptr+0] + Ls[iL*3+0]; env_loc[ptr+1] = env[ptr+1] + Ls[iL*3+1]; env_loc[ptr+2] = env[ptr+2] + Ls[iL*3+2]; } static void sort3c_kks1(double complex *out, double *bufr, double *bufi, int *kptij_idx, int *shls_slice, int *ao_loc, int nkpts, int nkpts_ij, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t njk = naoj * naok; const size_t nijk = njk * naoi; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; out += (ip * naoj + jp) * naok; int i, j, k, kk, ik, jk, ksh, ic, dk, dijk; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts_ij; kk++) { ik = kptij_idx[kk] / nkpts; jk = kptij_idx[kk] % nkpts; off = (ik*nkpts+jk) * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbr = bufr + off + dijk*ic; pbi = bufi + off + dijk*ic; for (j = 0; j < dj; j++) { for (k = 0; k < dk; k++) { for (i = 0; i < di; i++) { pout[i*njk+k] = pbr[k*dij+i] + pbi[k*dij+i]*_Complex_I; } } pout += naok; pbr += di; pbi += di; } } off += dijk * comp; } out += nijk * comp; } } static void _nr3c_fill_kk(int (*intor)(), void (*fsort)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const double ND1 = -1; jsh += jsh0; ish += ish0; int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS]; int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS]; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; int dkmax = INTBUFMAX / dij; int kshloc[ksh1-ksh0+1]; int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax); int i, m, msh0, msh1, dijm, dijmc, dijmk, empty; int ksh, dk, iL0, iL, jL, iLcount; int shls[3]; double *bufkk_r, *bufkk_i, *bufkL_r, *bufkL_i, *bufL, *pbuf, *cache; int (*fprescreen)(); if (pbcopt != NULL) { fprescreen = pbcopt->fprescreen; } else { fprescreen = PBCnoscreen; } shls[0] = ish; shls[1] = jsh; for (m = 0; m < nkshloc; m++) { msh0 = kshloc[m]; msh1 = kshloc[m+1]; dkmax = ao_loc[msh1] - ao_loc[msh0]; dijm = dij * dkmax; dijmc = dijm * comp; dijmk = dijmc * nkpts; bufkk_r = buf; bufkk_i = bufkk_r + (size_t)nkpts * dijmk; bufkL_r = bufkk_i + (size_t)nkpts * dijmk; bufkL_i = bufkL_r + (size_t)MIN(nimgs,IMGBLK) * dijmk; bufL = bufkL_i + (size_t)MIN(nimgs,IMGBLK) * dijmk; cache = bufL + (size_t)nimgs * dijmc; for (i = 0; i < nkpts*dijmk*OF_CMPLX; i++) { bufkk_r[i] = 0; } for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) { iLcount = MIN(IMGBLK, nimgs - iL0); for (iL = iL0; iL < iL0+iLcount; iL++) { shift_bas(env_loc, env, Ls, iptrxyz, iL); pbuf = bufL; for (jL = 0; jL < nimgs; jL++) { shift_bas(env_loc, env, Ls, jptrxyz, jL); if ((*fprescreen)(shls, pbcopt, atm, bas, env_loc)) { for (ksh = msh0; ksh < msh1; ksh++) { shls[2] = ksh; if ((*intor)(pbuf, NULL, shls, atm, natm, bas, nbas, env_loc, cintopt, cache)) { empty = 0; } dk = ao_loc[ksh+1] - ao_loc[ksh]; pbuf += dij*dk * comp; } } else { for (i = 0; i < dijmc; i++) { pbuf[i] = 0; } pbuf += dijmc; } } dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &nimgs, &D1, bufL, &dijmc, expkL_r, &nimgs, &D0, bufkL_r+(iL-iL0)*(size_t)dijmk, &dijmc); dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &nimgs, &D1, bufL, &dijmc, expkL_i, &nimgs, &D0, bufkL_i+(iL-iL0)*(size_t)dijmk, &dijmc); } // iL in range(0, nimgs) // conj(exp(1j*dot(h,k))) dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount, &D1, bufkL_r, &dijmk, expkL_r+iL0, &nimgs, &D1, bufkk_r, &dijmk); dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount, &D1, bufkL_i, &dijmk, expkL_i+iL0, &nimgs, &D1, bufkk_r, &dijmk); dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount, &D1, bufkL_i, &dijmk, expkL_r+iL0, &nimgs, &D1, bufkk_i, &dijmk); dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount, &ND1, bufkL_r, &dijmk, expkL_i+iL0, &nimgs, &D1, bufkk_i, &dijmk); } (*fsort)(out, bufkk_r, bufkk_i, kptij_idx, shls_slice, ao_loc, nkpts, nkpts_ij, comp, ish, jsh, msh0, msh1); } } /* ('...LM,kL,lM->...kl', int3c, exp_kL, exp_kL) */ void PBCnr3c_fill_kks1(int (*intor)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr3c_fill_kk(intor, &sort3c_kks1, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } static void sort3c_kks2_igtj(double complex *out, double *bufr, double *bufi, int *kptij_idx, int *shls_slice, int *ao_loc, int nkpts, int nkpts_ij, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; assert(naoi == naoj); const size_t njk = naoj * naok; const size_t nijk = njk * naoi; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; double complex *outij = out + (ip * naoj + jp) * naok; double complex *outji = out + (jp * naoj + ip) * naok; int i, j, k, kk, ik, jk, ksh, ic, dk, dijk; size_t offij, offji; double *pbij_r, *pbij_i, *pbji_r, *pbji_i; double complex *poutij, *poutji; for (kk = 0; kk < nkpts_ij; kk++) { ik = kptij_idx[kk] / nkpts; jk = kptij_idx[kk] % nkpts; offij = (ik*nkpts+jk) * dijmc; offji = (jk*nkpts+ik) * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { poutij = outij + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; poutji = outji + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbij_r = bufr + offij + dijk*ic; pbij_i = bufi + offij + dijk*ic; pbji_r = bufr + offji + dijk*ic; pbji_i = bufi + offji + dijk*ic; for (j = 0; j < dj; j++) { for (k = 0; k < dk; k++) { for (i = 0; i < di; i++) { poutij[i*njk +k] = pbij_r[k*dij+i] + pbij_i[k*dij+i]*_Complex_I; poutji[i*naok+k] = pbji_r[k*dij+i] - pbji_i[k*dij+i]*_Complex_I; } } poutij += naok; poutji += njk; pbij_r += di; pbij_i += di; pbji_r += di; pbji_i += di; } } offij += dijk * comp; offji += dijk * comp; } outij += nijk * comp; outji += nijk * comp; } } /* ('...LM,kL,lM->...kl', int3c, exp_kL, exp_kL) */ void PBCnr3c_fill_kks2(int (*intor)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { int ip = ish + shls_slice[0]; int jp = jsh + shls_slice[2] - nbas; if (ip > jp) { _nr3c_fill_kk(intor, &sort3c_kks2_igtj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } else if (ip == jp) { _nr3c_fill_kk(intor, &sort3c_kks1, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } } static void sort3c_ks1(double complex *out, double *bufr, double *bufi, int *shls_slice, int *ao_loc, int nkpts, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t njk = naoj * naok; const size_t nijk = njk * naoi; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; out += (ip * naoj + jp) * naok; int i, j, k, kk, ksh, ic, dk, dijk; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts; kk++) { off = kk * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbr = bufr + off + dijk*ic; pbi = bufi + off + dijk*ic; for (j = 0; j < dj; j++) { for (k = 0; k < dk; k++) { for (i = 0; i < di; i++) { pout[i*njk+k] = pbr[k*dij+i] + pbi[k*dij+i]*_Complex_I; } } pout += naok; pbr += di; pbi += di; } } off += dijk * comp; } out += nijk * comp; } } /* ('...LM,kL,kM->...k', int3c, exp_kL, exp_kL) */ static void _nr3c_fill_k(int (*intor)(), void (*fsort)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const char TRANS_N = 'N'; const double D1 = 1; jsh += jsh0; ish += ish0; int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS]; int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS]; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; int dkmax = INTBUFMAX10 / dij; int kshloc[ksh1-ksh0+1]; int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax); int i, m, msh0, msh1, dijmc, empty; size_t dijmk; int ksh, dk, iL, jL, jLcount; int shls[3]; double *bufexp_r = buf; double *bufexp_i = bufexp_r + nimgs * nkpts; double *bufk_r = bufexp_i + nimgs * nkpts; double *bufk_i, *bufL, *pbuf, *cache; int (*fprescreen)(); if (pbcopt != NULL) { fprescreen = pbcopt->fprescreen; } else { fprescreen = PBCnoscreen; } shls[0] = ish; shls[1] = jsh; for (m = 0; m < nkshloc; m++) { msh0 = kshloc[m]; msh1 = kshloc[m+1]; dkmax = ao_loc[msh1] - ao_loc[msh0]; dijmc = dij * dkmax * comp; dijmk = dijmc * nkpts; bufk_i = bufk_r + dijmk; bufL = bufk_i + dijmk; cache = bufL + nimgs * dijmc; for (i = 0; i < dijmk*OF_CMPLX; i++) { bufk_r[i] = 0; } for (iL = 0; iL < nimgs; iL++) { shift_bas(env_loc, env, Ls, iptrxyz, iL); pbuf = bufL; jLcount = 0; for (jL = 0; jL < nimgs; jL++) { shift_bas(env_loc, env, Ls, jptrxyz, jL); if ((*fprescreen)(shls, pbcopt, atm, bas, env_loc)) { for (ksh = msh0; ksh < msh1; ksh++) { shls[2] = ksh; if ((*intor)(pbuf, NULL, shls, atm, natm, bas, nbas, env_loc, cintopt, cache)) { empty = 0; } dk = ao_loc[ksh+1] - ao_loc[ksh]; pbuf += dij*dk * comp; } // ('k,kL->kL', conj(expkL[iL]), expkL) for (i = 0; i < nkpts; i++) { bufexp_r[i*nimgs+jLcount] = expkL_r[i*nimgs+jL] * expkL_r[i*nimgs+iL]; bufexp_r[i*nimgs+jLcount]+= expkL_i[i*nimgs+jL] * expkL_i[i*nimgs+iL]; bufexp_i[i*nimgs+jLcount] = expkL_i[i*nimgs+jL] * expkL_r[i*nimgs+iL]; bufexp_i[i*nimgs+jLcount]-= expkL_r[i*nimgs+jL] * expkL_i[i*nimgs+iL]; } jLcount++; } } dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &jLcount, &D1, bufL, &dijmc, bufexp_r, &nimgs, &D1, bufk_r, &dijmc); dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &jLcount, &D1, bufL, &dijmc, bufexp_i, &nimgs, &D1, bufk_i, &dijmc); } // iL in range(0, nimgs) (*fsort)(out, bufk_r, bufk_i, shls_slice, ao_loc, nkpts, comp, ish, jsh, msh0, msh1); } } /* ('...LM,kL,kM->...k', int3c, exp_kL, exp_kL) */ void PBCnr3c_fill_ks1(int (*intor)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr3c_fill_k(intor, sort3c_ks1, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } static void sort3c_ks2_igtj(double complex *out, double *bufr, double *bufi, int *shls_slice, int *ao_loc, int nkpts, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok; int i, j, k, ij, kk, ksh, ic, dk, dijk; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts; kk++) { off = kk * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbr = bufr + off + dijk*ic; pbi = bufi + off + dijk*ic; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { ij = j * di + i; for (k = 0; k < dk; k++) { pout[j*naok+k] = pbr[k*dij+ij] + pbi[k*dij+ij]*_Complex_I; } } pout += (i+ao_loc[ish]+1) * naok; } } off += dijk * comp; } out += nijk * comp; } } static void sort3c_ks2_ieqj(double complex *out, double *bufr, double *bufi, int *shls_slice, int *ao_loc, int nkpts, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok; int i, j, k, ij, kk, ksh, ic, dk, dijk; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts; kk++) { off = kk * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbr = bufr + off + dijk*ic; pbi = bufi + off + dijk*ic; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { ij = j * di + i; for (k = 0; k < dk; k++) { pout[j*naok+k] = pbr[k*dij+ij] + pbi[k*dij+ij]*_Complex_I; } } pout += (i+ao_loc[ish]+1) * naok; } } off += dijk * comp; } out += nijk * comp; } } /* ('...LM,kL,kM->...k', int3c, exp_kL, exp_kL) */ void PBCnr3c_fill_ks2(int (*intor)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { int ip = ish + shls_slice[0]; int jp = jsh + shls_slice[2] - nbas; if (ip > jp) { _nr3c_fill_k(intor, &sort3c_ks2_igtj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } else if (ip == jp) { _nr3c_fill_k(intor, &sort3c_ks2_ieqj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } } static void sort3c_gs1(double *out, double *in, int *shls_slice, int *ao_loc, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t njk = naoj * naok; const size_t nijk = njk * naoi; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; out += (ip * naoj + jp) * naok; int i, j, k, ksh, ic, dk, dijk; double *pin, *pout; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk * ic + ao_loc[ksh]-ao_loc[ksh0]; pin = in + dijk * ic; for (j = 0; j < dj; j++) { for (i = 0; i < di; i++) { for (k = 0; k < dk; k++) { pout[i*njk+k] = pin[k*dij+i]; } } pout += naok; pin += di; } } in += dijk * comp; } } static void _nr3c_fill_g(int (*intor)(), void (*fsort)(), double *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; jsh += jsh0; ish += ish0; int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS]; int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS]; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; int dkmax = INTBUFMAX10 / dij / 2 * MIN(IMGBLK,nimgs); int kshloc[ksh1-ksh0+1]; int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax); int i, m, msh0, msh1, dijm; int ksh, dk, iL, jL, dijkc; int shls[3]; int dijmc = dij * dkmax * comp; double *bufL = buf + dijmc; double *cache = bufL + dijmc; double *pbuf; int (*fprescreen)(); if (pbcopt != NULL) { fprescreen = pbcopt->fprescreen; } else { fprescreen = PBCnoscreen; } shls[0] = ish; shls[1] = jsh; for (m = 0; m < nkshloc; m++) { msh0 = kshloc[m]; msh1 = kshloc[m+1]; dkmax = ao_loc[msh1] - ao_loc[msh0]; dijm = dij * dkmax; dijmc = dijm * comp; for (i = 0; i < dijmc; i++) { bufL[i] = 0; } for (iL = 0; iL < nimgs; iL++) { shift_bas(env_loc, env, Ls, iptrxyz, iL); for (jL = 0; jL < nimgs; jL++) { shift_bas(env_loc, env, Ls, jptrxyz, jL); if ((*fprescreen)(shls, pbcopt, atm, bas, env_loc)) { pbuf = bufL; for (ksh = msh0; ksh < msh1; ksh++) { shls[2] = ksh; dk = ao_loc[ksh+1] - ao_loc[ksh]; dijkc = dij*dk * comp; if ((*intor)(buf, NULL, shls, atm, natm, bas, nbas, env_loc, cintopt, cache)) { for (i = 0; i < dijkc; i++) { pbuf[i] += buf[i]; } } pbuf += dijkc; } } } } // iL in range(0, nimgs) (*fsort)(out, bufL, shls_slice, ao_loc, comp, ish, jsh, msh0, msh1); } } /* ('...LM->...', int3c) */ void PBCnr3c_fill_gs1(int (*intor)(), double *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr3c_fill_g(intor, &sort3c_gs1, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } static void sort3c_gs2_igtj(double *out, double *in, int *shls_slice, int *ao_loc, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok; int i, j, k, ij, ksh, ic, dk, dijk; double *pin, *pout; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk * ic + ao_loc[ksh]-ao_loc[ksh0]; pin = in + dijk * ic; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { ij = j * di + i; for (k = 0; k < dk; k++) { pout[j*naok+k] = pin[k*dij+ij]; } } pout += (i+ao_loc[ish]+1) * naok; } } in += dijk * comp; } } static void sort3c_gs2_ieqj(double *out, double *in, int *shls_slice, int *ao_loc, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dij = di * di; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok; int i, j, k, ij, ksh, ic, dk, dijk; double *pin, *pout; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk * ic + ao_loc[ksh]-ao_loc[ksh0]; pin = in + dijk * ic; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { ij = j * di + i; for (k = 0; k < dk; k++) { pout[j*naok+k] = pin[k*dij+ij]; } } pout += (i+ao_loc[ish]+1) * naok; } } in += dijk * comp; } } /* ('...LM->...', int3c) */ void PBCnr3c_fill_gs2(int (*intor)(), double *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { int ip = ish + shls_slice[0]; int jp = jsh + shls_slice[2] - nbas; if (ip > jp) { _nr3c_fill_g(intor, &sort3c_gs2_igtj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } else if (ip == jp) { _nr3c_fill_g(intor, &sort3c_gs2_ieqj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } } int PBCsizeof_env(int *shls_slice, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; int ish, ia, np, nc; int nenv = 0; for (ish = ish0; ish < ish1; ish++) { ia = bas[ATOM_OF +ish*BAS_SLOTS]; nenv = MAX(atm[PTR_COORD+ia*ATM_SLOTS]+3, nenv); np = bas[NPRIM_OF+ish*BAS_SLOTS]; nc = bas[NCTR_OF +ish*BAS_SLOTS]; nenv = MAX(bas[PTR_EXP +ish*BAS_SLOTS]+np, nenv); nenv = MAX(bas[PTR_COEFF+ish*BAS_SLOTS]+np*nc, nenv); } return nenv; } void PBCnr3c_drv(int (*intor)(), void (*fill)(), double complex *eri, int nkpts_ij, int nkpts, int comp, int nimgs, double *Ls, double complex *expkL, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env, int nenv) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; double *expkL_r = malloc(sizeof(double) * nimgs*nkpts * OF_CMPLX); double *expkL_i = expkL_r + nimgs*nkpts; int i; for (i = 0; i < nimgs*nkpts; i++) { expkL_r[i] = creal(expkL[i]); expkL_i[i] = cimag(expkL[i]); } size_t count; if (fill == &PBCnr3c_fill_kks1 || fill == &PBCnr3c_fill_kks2) { int dijk =(GTOmax_shell_dim(ao_loc, shls_slice+0, 1) * GTOmax_shell_dim(ao_loc, shls_slice+2, 1) * GTOmax_shell_dim(ao_loc, shls_slice+4, 1)); count = nkpts*nkpts * OF_CMPLX + nkpts*MIN(nimgs,IMGBLK) * OF_CMPLX + nimgs; // MAX(INTBUFMAX, dijk) to ensure buffer is enough for at least one (i,j,k) shell count*= MAX(INTBUFMAX, dijk) * comp; } else { count = (nkpts * OF_CMPLX + nimgs) * INTBUFMAX10 * comp; count+= nimgs * nkpts * OF_CMPLX; } const int cache_size = GTOmax_cache_size(intor, shls_slice, 3, atm, natm, bas, nbas, env); #pragma omp parallel { int ish, jsh, ij; double *env_loc = malloc(sizeof(double)*nenv); NPdcopy(env_loc, env, nenv); double *buf = malloc(sizeof(double)*(count+cache_size)); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill)(intor, eri, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } free(buf); free(env_loc); } free(expkL_r); } static void sort2c_ks1(double complex *out, double *bufr, double *bufi, int *shls_slice, int *ao_loc, int nkpts, int comp, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t nij = naoi * naoj; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dimax = ao_loc[msh1] - ao_loc[msh0]; const size_t dmjc = dimax * dj * comp; out += jp; int i, j, kk, ish, ic, di, dij; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts; kk++) { off = kk * dmjc; for (ish = msh0; ish < msh1; ish++) { di = ao_loc[ish+1] - ao_loc[ish]; dij = di * dj; for (ic = 0; ic < comp; ic++) { pout = out + nij*ic + naoj*(ao_loc[ish]-ao_loc[ish0]); pbr = bufr + off + dij*ic; pbi = bufi + off + dij*ic; for (j = 0; j < dj; j++) { for (i = 0; i < di; i++) { pout[i*naoj+j] = pbr[j*di+i] + pbi[j*di+i]*_Complex_I; } } } off += dij * comp; } out += nij * comp; } } static void _nr2c_fill(int (*intor)(), double complex *out, int nkpts, int comp, int nimgs, int jsh, int ish0, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const char TRANS_N = 'N'; const double D1 = 1; const double D0 = 0; ish0 += shls_slice[0]; jsh += jsh0; int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dimax = INTBUFMAX10 / dj; int ishloc[ish1-ish0+1]; int nishloc = shloc_partition(ishloc, ao_loc, ish0, ish1, dimax); int m, msh0, msh1, dmjc, ish, di, empty; int jL; int shls[2]; double *bufk_r = buf; double *bufk_i, *bufL, *pbuf, *cache; shls[1] = jsh; for (m = 0; m < nishloc; m++) { msh0 = ishloc[m]; msh1 = ishloc[m+1]; dimax = ao_loc[msh1] - ao_loc[msh0]; dmjc = dj * dimax * comp; bufk_i = bufk_r + dmjc * nkpts; bufL = bufk_i + dmjc * nkpts; cache = bufL + dmjc * nimgs; pbuf = bufL; for (jL = 0; jL < nimgs; jL++) { shift_bas(env_loc, env, Ls, jptrxyz, jL); for (ish = msh0; ish < msh1; ish++) { shls[0] = ish; di = ao_loc[ish+1] - ao_loc[ish]; if ((*intor)(pbuf, NULL, shls, atm, natm, bas, nbas, env_loc, cintopt, cache)) { empty = 0; } pbuf += di * dj * comp; } } dgemm_(&TRANS_N, &TRANS_N, &dmjc, &nkpts, &nimgs, &D1, bufL, &dmjc, expkL_r, &nimgs, &D0, bufk_r, &dmjc); dgemm_(&TRANS_N, &TRANS_N, &dmjc, &nkpts, &nimgs, &D1, bufL, &dmjc, expkL_i, &nimgs, &D0, bufk_i, &dmjc); sort2c_ks1(out, bufk_r, bufk_i, shls_slice, ao_loc, nkpts, comp, jsh, msh0, msh1); } } /* ('...M,kL->...k', int3c, exp_kL, exp_kL) */ void PBCnr2c_fill_ks1(int (*intor)(), double complex *out, int nkpts, int comp, int nimgs, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr2c_fill(intor, out, nkpts, comp, nimgs, jsh, 0, buf, env_loc, Ls, expkL_r, expkL_i, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } void PBCnr2c_fill_ks2(int (*intor)(), double complex *out, int nkpts, int comp, int nimgs, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr2c_fill(intor, out, nkpts, comp, nimgs, jsh, jsh, buf, env_loc, Ls, expkL_r, expkL_i, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } void PBCnr2c_drv(int (*intor)(), void (*fill)(), double complex *out, int nkpts, int comp, int nimgs, double *Ls, double complex *expkL, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env, int nenv) { const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int njsh = jsh1 - jsh0; double *expkL_r = malloc(sizeof(double) * nimgs*nkpts * OF_CMPLX); double *expkL_i = expkL_r + nimgs*nkpts; int i; for (i = 0; i < nimgs*nkpts; i++) { expkL_r[i] = creal(expkL[i]); expkL_i[i] = cimag(expkL[i]); } const int cache_size = GTOmax_cache_size(intor, shls_slice, 2, atm, natm, bas, nbas, env); #pragma omp parallel { int jsh; double *env_loc = malloc(sizeof(double)*nenv); NPdcopy(env_loc, env, nenv); size_t count = nkpts * OF_CMPLX + nimgs; double *buf = malloc(sizeof(double)*(count*INTBUFMAX10*comp+cache_size)); #pragma omp for schedule(dynamic) for (jsh = 0; jsh < njsh; jsh++) { (*fill)(intor, out, nkpts, comp, nimgs, jsh, buf, env_loc, Ls, expkL_r, expkL_i, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } free(buf); free(env_loc); } free(expkL_r); }
mttkrp_omp.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include "hicoo.h" #include <omp.h> #define CHUNKSIZE 1 int sptOmpMTTKRPHiCOOKernels( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOBlocks( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb); int sptOmpMTTKRPHiCOOBlocks_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb); int sptOmpMTTKRPHiCOOKernelsBlocks( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb); int sptOmpMTTKRPHiCOOKernelsBlocks_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb); int sptOmpMTTKRPHiCOOKernels_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Two( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Two( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOBlocks_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb); int sptOmpMTTKRPHiCOOBlocks_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb); int sptOmpMTTKRPHiCOOKernelsBlocks_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb); int sptOmpMTTKRPHiCOOKernelsBlocks_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb); /** * Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode * @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R * @param[in] hitsr the HiCOO sparse tensor input * @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary * @param[in] mats_order the order of the Khatri-Rao products * @param[in] mode the mode on which the MTTKRP is performed * @param[in] scratch an temporary array to store intermediate results, space assigned before this function * * This function uses support arbitrary-order sparse tensors with Khatri-Rao * products of dense factor matrices, the output is the updated dense matrix for the "mode". */ int sptOmpMTTKRPHiCOO( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb) { if(tk > 1 && tb == 1) { sptAssert(sptOmpMTTKRPHiCOOKernels(hitsr, mats, mats_order, mode, tk) == 0); } else if(tk == 1 && tb > 1) { sptAssert(sptOmpMTTKRPHiCOOBlocks(hitsr, mats, mats_order, mode, tb) == 0); } else if(tk > 1 && tb > 1) { sptAssert(sptOmpMTTKRPHiCOOKernelsBlocks(hitsr, mats, mats_order, mode, tk, tb) == 0); } else if(tk == 1 && tb == 1) { printf("Should specify sequetial MTTKRP.\n"); return -1; } return 0; } int sptOmpMTTKRPHiCOO_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb) { if(tk > 1 && tb == 1) { sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling(hitsr, mats, mats_order, mode, tk) == 0); } else if(tk == 1 && tb > 1) { sptAssert(sptOmpMTTKRPHiCOOBlocks_MatrixTiling(hitsr, mats, mats_order, mode, tb) == 0); } else if(tk > 1 && tb > 1) { sptAssert(sptOmpMTTKRPHiCOOKernelsBlocks_MatrixTiling(hitsr, mats, mats_order, mode, tk, tb) == 0); } else if(tk == 1 && tb == 1) { printf("Should specify sequetial MTTKRP with -d -2.\n"); return -1; } return 0; } int sptOmpMTTKRPHiCOO_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb, int balanced) { if(tk > 1 && tb == 1) { if (balanced == 0) sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled(hitsr, mats, mats_order, mode, tk) == 0); else if (balanced == 1) sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced(hitsr, mats, mats_order, mode, tk) == 0); } else { printf("Haven't support block parallelism.\n"); return -1; } return 0; } int sptOmpMTTKRPHiCOO_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb, int balanced) { if(tk > 1 && tb == 1) { if(balanced == 0) sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); else if (balanced == 1) sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); } else { printf("Haven't support block parallelism.\n"); return -1; } return 0; } int sptOmpMTTKRPHiCOO_MatrixTiling_Scheduled_Reduce_Two( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb) { if(tk > 1 && tb == 1) { sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Two(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); } else { printf("Haven't support block parallelism.\n"); return -1; } return 0; } int sptOmpMTTKRPHiCOOKernels( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D(hitsr, mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const M = mats[nmodes]; sptValue * const mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); // omp_lock_t lock; // omp_init_lock(&lock); /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { /* Allocate thread-private data */ sptIndex * block_coord = (sptIndex*)malloc(nmodes * sizeof(*block_coord)); sptIndex * ele_coord = (sptIndex*)malloc(nmodes * sizeof(*ele_coord)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Block indices */ for(sptIndex m=0; m<nmodes; ++m) block_coord[m] = hitsr->binds[m].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptNnzIndex z=bptr_begin; z<bptr_end; ++z) { /* Element indices */ for(sptIndex m=0; m<nmodes; ++m) ele_coord[m] = (block_coord[m] << hitsr->sb_bits) + hitsr->einds[m].data[z]; /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptMatrix * times_mat = mats[times_mat_index]; sptIndex tmp_i = ele_coord[times_mat_index]; sptValue const entry = vals[z]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; times_mat = mats[times_mat_index]; tmp_i = ele_coord[times_mat_index]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } sptIndex const mode_i = ele_coord[mode]; // omp_set_lock(&lock); for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += scratch.data[r]; } // omp_unset_lock(&lock); } // End loop entries } // End loop blocks /* Free thread-private space */ free(block_coord); free(ele_coord); sptFreeValueVector(&scratch); } // End loop kernels // omp_destroy_lock(&lock); return 0; } int sptOmpMTTKRPHiCOOKernels_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptBlockIndex block_coord_mode = hitsr->binds[mode].data[b]; sptBlockIndex block_coord_1 = hitsr->binds[times_mat_index_1].data[b]; sptBlockIndex block_coord_2 = hitsr->binds[times_mat_index_2].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptIndex mode_i = (block_coord_mode << hitsr->sb_bits) + hitsr->einds[mode].data[z]; sptIndex tmp_i_1 = (block_coord_1 << hitsr->sb_bits) + hitsr->einds[times_mat_index_1].data[z]; sptIndex tmp_i_2 = (block_coord_2 << hitsr->sb_bits) + hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling(hitsr, mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); /* Loop kernels */ #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled(hitsr, mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k=0; k<num_kernel_dim; ++k) { int tid = omp_get_thread_num(); if(i >= kschr_mode[k].len) continue; sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels } // End loop iterations #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced(hitsr, mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; sptIndex npars = hitsr->nkpars[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop partitions */ for(sptIndex p=0; p<npars; ++p) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; int tid = omp_get_thread_num(); sptIndex j_begin = kschr_balanced_pos_mode[i].data[p]; sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; /* Loop inside a partition */ for(sptIndex j = j_begin; j < j_end; ++j) { sptIndex kernel_num = kschr_balanced_mode[i].data[j]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop inside a partition } // End loop kernels } // End loop partitions /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); sptIndex kernel_num = hitsr->kschr_rest[mode].data[k]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; // printf("nkiters: %u, num_kernel_dim: %u\n", hitsr->nkiters[mode], num_kernel_dim); #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k=0; k<num_kernel_dim; ++k) { int tid = omp_get_thread_num(); // printf("tid: %d, (i, k): (%u, %u)\n", tid, i, k); if(i >= kschr_mode[k].len) { // printf("i: %u, k: %u\n", i, k); continue; } sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels } // End loop iterations #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; sptIndex npars = hitsr->nkpars[mode]; // printf("nkiters: %u, num_kernel_dim: %u\n", hitsr->nkiters[mode], num_kernel_dim); #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop partitions */ for(sptIndex p=0; p<npars; ++p) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; int tid = omp_get_thread_num(); sptIndex j_begin = kschr_balanced_pos_mode[i].data[p]; sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; /* Loop inside a partition */ for(sptIndex j = j_begin; j < j_end; ++j) { sptIndex kernel_num = kschr_balanced_mode[i].data[j]; // printf("tid: %d, (i, j): (%u, %u), kernel_num: %u\n", tid, i, j, kernel_num); sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop inside a partition } // End loop kernels } // End loop partitions /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); sptIndex kernel_num = hitsr->kschr_rest[mode].data[k]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { int tid = omp_get_thread_num(); /* Loop kernels */ for(sptIndex k=0; k<num_kernel_dim; ++k) { if(i >= kschr_mode[k].len) continue; sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels } // End loop iterations /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; sptIndex npars = hitsr->nkpars[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex p=0; p<npars; ++p) { int tid = omp_get_thread_num(); /* Loop kernels */ for(sptIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; sptIndex j_begin = kschr_balanced_pos_mode[i].data[p]; sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; for(sptIndex j=j_begin; j<j_end; ++j) { sptIndex kernel_num = kschr_balanced_mode[i].data[j]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End kernels in a partition } // End loop kernels } // End loop iterations /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); sptIndex kernel_num = hitsr->kschr_rest[mode].data[k]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; /* Use copy_mats to reduce atomics */ sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { int tid = omp_get_thread_num(); /* Loop kernels */ for(sptIndex k=0; k<num_kernel_dim; ++k) { if(i >= kschr_mode[k].len) { // printf("i: %u, k: %u\n", i, k); continue; } sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* use copy_mats to store each thread's output */ sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels } // End loop iterations /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; sptIndex npars = hitsr->nkpars[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex p=0; p<npars; ++p) { int tid = omp_get_thread_num(); /* Loop kernels */ for(sptIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; sptIndex j_begin = kschr_balanced_pos_mode[i].data[p]; sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; for(sptIndex j=j_begin; j<j_end; ++j) { sptIndex kernel_num = kschr_balanced_mode[i].data[j]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* use copy_mats to store each thread's output */ sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End kernels in a partition } // End loop kernels } // End loop partitions /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); sptIndex kernel_num = hitsr->kschr_rest[mode].data[k]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* Use copy_mats to reduce atomics */ sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } /* Calculate load balance of kernels */ #ifdef NNZ_STATISTICS sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Two( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Two(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; int tk2 = 2; /* Loop parallel iterations */ #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk/tk2) for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { int tid = omp_get_thread_num(); /* Loop kernels */ // TODO: cannot compile using icc // #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk2) for(sptIndex k=0; k<num_kernel_dim; ++k) { if(i >= kschr_mode[k].len) continue; sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels } // End loop iterations /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk/tk2; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Two( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; int tk2 = 2; /* Loop parallel iterations */ #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk/tk2) for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { int tid = omp_get_thread_num(); /* Loop kernels */ // Cannot compile using icc // #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk2) for(sptIndex k=0; k<num_kernel_dim; ++k) { if(i >= kschr_mode[k].len) { // printf("i: %u, k: %u\n", i, k); continue; } sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* use copy_mats to store each thread's output */ sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels } // End loop iterations /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk/tk2; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } return 0; } int sptOmpMTTKRPHiCOOBlocks( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOBlocks_3D(hitsr, mats, mats_order, mode, tb) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const M = mats[nmodes]; sptValue * const mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); // omp_lock_t lock; // omp_init_lock(&lock); /* Loop kernels */ for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ sptIndex * block_coord = (sptIndex*)malloc(nmodes * sizeof(*block_coord)); sptIndex * ele_coord = (sptIndex*)malloc(nmodes * sizeof(*ele_coord)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Block indices */ for(sptIndex m=0; m<nmodes; ++m) block_coord[m] = hitsr->binds[m].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Element indices */ for(sptIndex m=0; m<nmodes; ++m) ele_coord[m] = (block_coord[m] << hitsr->sb_bits) + hitsr->einds[m].data[z]; /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptMatrix * times_mat = mats[times_mat_index]; sptIndex tmp_i = ele_coord[times_mat_index]; sptValue const entry = vals[z]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; times_mat = mats[times_mat_index]; tmp_i = ele_coord[times_mat_index]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } sptIndex const mode_i = ele_coord[mode]; // omp_set_lock(&lock); for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += scratch.data[r]; } // omp_unset_lock(&lock); } // End loop entries /* Free thread-private space */ free(block_coord); free(ele_coord); sptFreeValueVector(&scratch); } // End loop blocks } // End loop kernels // omp_destroy_lock(&lock); return 0; } int sptOmpMTTKRPHiCOOBlocks_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptBlockIndex block_coord_mode = hitsr->binds[mode].data[b]; sptBlockIndex block_coord_1 = hitsr->binds[times_mat_index_1].data[b]; sptBlockIndex block_coord_2 = hitsr->binds[times_mat_index_2].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptIndex mode_i = (block_coord_mode << hitsr->sb_bits) + hitsr->einds[mode].data[z]; sptIndex tmp_i_1 = (block_coord_1 << hitsr->sb_bits) + hitsr->einds[times_mat_index_1].data[z]; sptIndex tmp_i_2 = (block_coord_2 << hitsr->sb_bits) + hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOBlocks_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOBlocks_3D_MatrixTiling(hitsr, mats, mats_order, mode, tb) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); /* Loop kernels */ for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop blocks } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOBlocks_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOKernelsBlocks( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb) { omp_set_nested(1); omp_set_dynamic(0); sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernelsBlocks_3D(hitsr, mats, mats_order, mode, tk, tb) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const M = mats[nmodes]; sptValue * const mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); // omp_lock_t lock; // omp_init_lock(&lock); /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ sptIndex * block_coord = (sptIndex*)malloc(nmodes * sizeof(*block_coord)); sptIndex * ele_coord = (sptIndex*)malloc(nmodes * sizeof(*ele_coord)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Block indices */ for(sptIndex m=0; m<nmodes; ++m) block_coord[m] = hitsr->binds[m].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Element indices */ for(sptIndex m=0; m<nmodes; ++m) ele_coord[m] = (block_coord[m] << hitsr->sb_bits) + hitsr->einds[m].data[z]; /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptMatrix * times_mat = mats[times_mat_index]; sptIndex tmp_i = ele_coord[times_mat_index]; sptValue const entry = vals[z]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; times_mat = mats[times_mat_index]; tmp_i = ele_coord[times_mat_index]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } sptIndex const mode_i = ele_coord[mode]; // omp_set_lock(&lock); for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += scratch.data[r]; } // omp_unset_lock(&lock); } // End loop entries /* Free thread-private space */ free(block_coord); free(ele_coord); sptFreeValueVector(&scratch); } // End loop blocks } // End loop kernels // omp_destroy_lock(&lock); return 0; } int sptOmpMTTKRPHiCOOKernelsBlocks_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb) { omp_set_nested(1); omp_set_dynamic(0); sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptBlockIndex block_coord_mode = hitsr->binds[mode].data[b]; sptBlockIndex block_coord_1 = hitsr->binds[times_mat_index_1].data[b]; sptBlockIndex block_coord_2 = hitsr->binds[times_mat_index_2].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptIndex mode_i = (block_coord_mode << hitsr->sb_bits) + hitsr->einds[mode].data[z]; sptIndex tmp_i_1 = (block_coord_1 << hitsr->sb_bits) + hitsr->einds[times_mat_index_1].data[z]; sptIndex tmp_i_2 = (block_coord_2 << hitsr->sb_bits) + hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOKernelsBlocks_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb) { sptIndex const nmodes = hitsr->nmodes; omp_set_nested(1); omp_set_dynamic(0); if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernelsBlocks_3D_MatrixTiling(hitsr, mats, mats_order, mode, tk, tb) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop blocks } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOKernelsBlocks_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; }
filter.h
#ifndef OPENMC_TALLIES_FILTER_H #define OPENMC_TALLIES_FILTER_H #include <cstdint> #include <memory> #include <string> #include <unordered_map> #include <vector> #include <gsl/gsl> #include "openmc/constants.h" #include "openmc/hdf5_interface.h" #include "openmc/particle.h" #include "pugixml.hpp" namespace openmc { //============================================================================== //! Stores bins and weights for filtered tally events. //============================================================================== class FilterMatch { public: std::vector<int> bins_; std::vector<double> weights_; int i_bin_; bool bins_present_ {false}; }; } // namespace openmc // Without an explicit instantiation of vector<FilterMatch>, the Intel compiler // will complain about the threadprivate directive on filter_matches. Note that // this has to happen *outside* of the openmc namespace extern template class std::vector<openmc::FilterMatch>; namespace openmc { //============================================================================== //! Modifies tally score events. //============================================================================== class Filter { public: //---------------------------------------------------------------------------- // Constructors, destructors, factory functions Filter(); virtual ~Filter(); //! Create a new tally filter // //! \param[in] type Type of the filter //! \param[in] id Unique ID for the filter. If none is passed, an ID is //! automatically assigned //! \return Pointer to the new filter object static Filter* create(const std::string& type, int32_t id = -1); //! Create a new tally filter from an XML node // //! \param[in] node XML node //! \return Pointer to the new filter object static Filter* create(pugi::xml_node node); //! Uses an XML input to fill the filter's data fields. virtual void from_xml(pugi::xml_node node) = 0; //---------------------------------------------------------------------------- // Methods virtual std::string type() const = 0; //! Matches a tally event to a set of filter bins and weights. //! //! \param[out] match will contain the matching bins and corresponding //! weights; note that there may be zero matching bins virtual void get_all_bins(const Particle* p, int estimator, FilterMatch& match) const = 0; //! Writes data describing this filter to an HDF5 statepoint group. virtual void to_statepoint(hid_t filter_group) const { write_dataset(filter_group, "type", type()); write_dataset(filter_group, "n_bins", n_bins_); } //! Return a string describing a filter bin for the tallies.out file. // //! For example, an `EnergyFilter` might return the string //! "Incoming Energy [0.625E-6, 20.0)". virtual std::string text_label(int bin) const = 0; //---------------------------------------------------------------------------- // Accessors //! Get unique ID of filter //! \return Unique ID int32_t id() const { return id_; } //! Assign a unique ID to the filter //! \param[in] Unique ID to assign. A value of -1 indicates that an ID should //! be automatically assigned void set_id(int32_t id); //! Get number of bins //! \return Number of bins int n_bins() const { return n_bins_; } gsl::index index() const { return index_; } //---------------------------------------------------------------------------- // Data members protected: int n_bins_; private: int32_t id_ {C_NONE}; gsl::index index_; }; //============================================================================== // Global variables //============================================================================== namespace simulation { extern std::vector<FilterMatch> filter_matches; #pragma omp threadprivate(filter_matches) } // namespace simulation namespace model { extern "C" int32_t n_filters; extern std::vector<std::unique_ptr<Filter>> tally_filters; extern std::unordered_map<int, int> filter_map; } //============================================================================== // Non-member functions //============================================================================== //! Make sure index corresponds to a valid filter int verify_filter(int32_t index); } // namespace openmc #endif // OPENMC_TALLIES_FILTER_H
unpk.c
/* unpack_grib * 3/2008 public domain Wesley Ebisuzaki * 5/2016 public domain DWD */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <stddef.h> #include "wgrib2.h" #include "grb2.h" #ifdef USE_PNG #include <png.h> int dec_png_clone(unsigned char *,int *,int *,char *); int i; #endif #ifdef USE_JASPER #include <jasper/jasper.h> #endif #ifdef USE_AEC #include <libaec.h> #endif /* * unpack grib -- only some formats (code table 5.0) are supported * * supported: 0 (simple), 4 (ieee), 40 (jpeg), 41(png), 42(aec) * * input: sec[] * float data[npnts] * */ int unpk_grib(unsigned char **sec, float *data) { int packing, bitmap_flag, nbits; unsigned int ndata, ii; unsigned char *mask_pointer, mask; unsigned char *ieee, *p; float tmp; // float reference, tmp; double reference; double bin_scale, dec_scale, b; #ifdef USE_PNG int width, height; #endif #ifdef USE_JASPER jas_image_t *image; char *opts; jas_stream_t *jpcstream; jas_image_cmpt_t *pcmpt; jas_matrix_t *jas_data; int j, k; #endif #if (defined USE_PNG || defined USE_AEC) unsigned char *c; #endif #ifdef USE_AEC struct aec_stream strm; int status; int numBitsNeeded; size_t size; #endif packing = code_table_5_0(sec); // ndata = (int) GB2_Sec3_npts(sec); ndata = GB2_Sec3_npts(sec); bitmap_flag = code_table_6_0(sec); if (bitmap_flag != 0 && bitmap_flag != 254 && bitmap_flag != 255) fatal_error("unknown bitmap", ""); if (packing == 4) { // ieee if (sec[5][11] != 1) fatal_error_i("unpk ieee grib file precision %d not supported", (int) sec[5][11]); // ieee depacking -- simple no bitmap if (bitmap_flag == 255) { for (ii = 0; ii < ndata; ii++) { data[ii] = ieee2flt_nan(sec[7]+5+ii*4); } return 0; } if (bitmap_flag == 0 || bitmap_flag == 254) { mask_pointer = sec[6] + 6; ieee = sec[7]+5; mask = 0; for (ii = 0; ii < ndata; ii++) { if ((ii & 7) == 0) mask = *mask_pointer++; if (mask & 128) { data[ii] = ieee2flt_nan(ieee); ieee += 4; } else { data[ii] = UNDEFINED; } mask <<= 1; } return 0; } fatal_error("unknown bitmap", ""); } else if (packing == 0 || packing == 61) { // simple grib1 packing 61 -- log preprocessing p = sec[5]; reference = ieee2flt(p+11); bin_scale = Int_Power(2.0, int2(p+15)); dec_scale = Int_Power(10.0, -int2(p+17)); nbits = p[19]; b = 0.0; if (packing == 61) b = ieee2flt(p+20); if (bitmap_flag != 0 && bitmap_flag != 254 && bitmap_flag != 255) fatal_error("unknown bitmap", ""); if (nbits == 0) { tmp = reference*dec_scale; if (packing == 61) tmp = exp(tmp) - b; // remove log prescaling if (bitmap_flag == 255) { for (ii = 0; ii < ndata; ii++) { data[ii] = tmp; } return 0; } if (bitmap_flag == 0 || bitmap_flag == 254) { mask_pointer = sec[6] + 6; mask = 0; for (ii = 0; ii < ndata; ii++) { if ((ii & 7) == 0) mask = *mask_pointer++; data[ii] = (mask & 128) ? tmp : UNDEFINED; mask <<= 1; } return 0; } } mask_pointer = (bitmap_flag == 255) ? NULL : sec[6] + 6; unpk_0(data, sec[7]+5, mask_pointer, nbits, ndata, reference, bin_scale,dec_scale); if (packing == 61) { // remove log prescaling // #pragma omp parallel for private(ii) schedule(static) for (ii = 0; ii < ndata; ii++) { if (DEFINED_VAL(data[ii])) data[ii] = exp(data[ii]) - b; } } return 0; } else if (packing == 2 || packing == 3) { // complex return unpk_complex(sec, data, ndata); } else if (packing == 200) { // run length return unpk_run_length(sec, data, ndata); } #ifdef USE_JASPER else if (packing == 40 || packing == 40000) { // jpeg2000 p = sec[5]; reference = ieee2flt(p+11); bin_scale = Int_Power(2.0, int2(p+15)); dec_scale = Int_Power(10.0, -int2(p+17)); nbits = p[19]; if (nbits == 0) { tmp = reference*dec_scale; if (bitmap_flag == 255) { for (ii = 0; ii < ndata; ii++) { data[ii] = tmp; } return 0; } if (bitmap_flag == 0 || bitmap_flag == 254) { mask_pointer = sec[6] + 6; ieee = sec[7]+5; mask = 0; for (ii = 0; ii < ndata; ii++) { if ((ii & 7) == 0) mask = *mask_pointer++; data[ii] = (mask & 128) ? tmp : UNDEFINED; mask <<= 1; } return 0; } fatal_error("unknown bitmap", ""); } // decode jpeg2000 image = NULL; opts = NULL; jpcstream=jas_stream_memopen((char *) sec[7]+5, (int) GB2_Sec7_size(sec)-5); image = jpc_decode(jpcstream, opts); if (image == NULL) fatal_error("jpeg2000 decoding", ""); pcmpt = image->cmpts_[0]; if (image->numcmpts_ != 1 ) fatal_error("unpk: Found color image. Grayscale expected",""); jas_data=jas_matrix_create(jas_image_height(image), jas_image_width(image)); jas_image_readcmpt(image,0,0,0,jas_image_width(image), jas_image_height(image),jas_data); // transfer data k = ndata - pcmpt->height_ * pcmpt->width_; // #pragma omp parallel for private(ii,j) for (ii=0;ii<pcmpt->height_;ii++) { for (j=0;j<pcmpt->width_;j++) { // data[k++] = (((jas_data->rows_[ii][j])*bin_scale)+reference)*dec_scale; data[k+j+ii*pcmpt->width_] = (((jas_data->rows_[ii][j])*bin_scale)+reference)*dec_scale; } } if (bitmap_flag == 0 || bitmap_flag == 254) { k = ndata - pcmpt->height_ * pcmpt->width_; mask_pointer = sec[6] + 6; mask = 0; for (ii = 0; ii < ndata; ii++) { if ((ii & 7) == 0) mask = *mask_pointer++; data[ii] = (mask & 128) ? data[k++] : UNDEFINED; mask <<= 1; } } else if (bitmap_flag != 255) { fatal_error_i("unknown bitmap: %d", bitmap_flag); } jas_matrix_destroy(jas_data); jas_stream_close(jpcstream); jas_image_destroy(image); return 0; } #endif #ifdef USE_PNG else if (packing == 41) { // png p = sec[5]; reference = ieee2flt(p+11); bin_scale = Int_Power(2.0, int2(p+15)); dec_scale = Int_Power(10.0, -int2(p+17)); nbits = p[19]; if (nbits == 0) { tmp = reference*dec_scale; if (bitmap_flag == 255) { for (ii = 0; ii < ndata; ii++) { data[ii] = tmp; } return 0; } if (bitmap_flag == 0 || bitmap_flag == 254) { mask_pointer = sec[6] + 6; ieee = sec[7]+5; mask = 0; for (ii = 0; ii < ndata; ii++) { if ((ii & 7) == 0) mask = *mask_pointer++; data[ii] = (mask & 128) ? tmp : UNDEFINED; mask <<= 1; } return 0; } fatal_error("unknown bitmap", ""); } if ((c = (unsigned char *) malloc(4*sizeof(char) * (size_t) ndata)) == NULL) fatal_error("unpk: allocation error", ""); i = (int) dec_png_clone(sec[7]+5, &width, &height, (char *) c); if (i) fatal_error_i("unpk: png decode error %d",i); mask_pointer = (bitmap_flag == 255) ? NULL : sec[6] + 6; // check sizes if (mask_pointer == NULL) { if (ndata != width*height) fatal_error_i("png size mismatch w*h=%d", width*height); } else { if (ndata != width*height + missing_points(mask_pointer, GB2_Sec3_npts(sec)) ) fatal_error("png size mismatch", ""); } unpk_0(data, c, mask_pointer, nbits, ndata, reference, bin_scale,dec_scale); free(c); return 0; } #endif #ifdef USE_AEC else if (packing == 42) { // aec p = sec[5]; reference = ieee2flt(p+11); bin_scale = Int_Power(2.0, int2(p+15)); dec_scale = Int_Power(10.0, -int2(p+17)); nbits = p[19]; if (nbits == 0) { tmp = reference*dec_scale; if (bitmap_flag == 255) { for (ii = 0; ii < ndata; ii++) { data[ii] = tmp; } return 0; } if (bitmap_flag == 0 || bitmap_flag == 254) { mask_pointer = sec[6] + 6; mask = 0; for (ii = 0; ii < ndata; ii++) { if ((ii & 7) == 0) mask = *mask_pointer++; data[ii] = (mask & 128) ? tmp : UNDEFINED; mask <<= 1; } return 0; } fatal_error("unknown bitmap", ""); } strm.flags = (int) sec[5][21]; strm.bits_per_sample = (int) sec[5][19]; strm.block_size = (int) sec[5][22]; strm.rsi = uint2(sec[5]+23); strm.next_in = sec[7]+5; strm.avail_in = uint4(sec[7]) - 5; numBitsNeeded = (int) sec[5][19]; size = ((numBitsNeeded + 7)/8) * (size_t) ndata; if ((c = (unsigned char *) malloc(size)) == NULL) fatal_error("unpk: allocation error", ""); strm.next_out = c; strm.avail_out = size; status = aec_buffer_decode(&strm); if (status != AEC_OK) fatal_error_i("unpk: aec decode error %d",status); mask_pointer = (bitmap_flag == 255) ? NULL : sec[6] + 6; unpk_0(data, c, mask_pointer, ((nbits+7)/8)*8, ndata, reference, bin_scale, dec_scale); free(c); return 0; } #endif fatal_error_i("packing type %d not supported", packing); return 1; }
modifier_view.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2013, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: David Weese <david.weese@fu-berlin.de> // Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de> // ========================================================================== // TODO(holtgrew): Split into modified_string_mod_view.h and modified_iterator_mod_view.h. // TODO(holtgrew): Move out convert() #ifndef SEQAN_MODIFIER_MODIFIER_VIEW_H_ #define SEQAN_MODIFIER_MODIFIER_VIEW_H_ namespace seqan { // ========================================================================== // Forwards // ========================================================================== // ========================================================================== // Classes // ========================================================================== // -------------------------------------------------------------------------- // Class ModView // -------------------------------------------------------------------------- /*! * @class ModViewModifiedIterator * @extends ModifiedIterator * @headerfile <seqan/modifier.h> * * @brief Transforms the character of a host using a custom functor. * * @signature template <typename THost, typename TFunctor> * class ModifiedIterator<THost, ModView<TFunctor> >; * * @tparam THost The host iterator. * @tparam TFunctor A unary functor type. */ /*! * @class ModViewModifiedString * @extends ModifiedString * @headerfile <seqan/modifier.h> * * @brief Transforms the character of a host using a custom functor. * * @signature template <typename THost, typename TFunctor> * class ModifiedString<THost, ModView<TFunctor> >; * * @tparam THost The host iterator. * @tparam TFunctor A unary functor type. */ /** .Spec.ModView: ..summary:Transforms the characters of the $THost$ string/iterator using a custom function. ..cat:Modifier ..general:Class.ModifiedIterator ..general:Class.ModifiedString ..signature:ModifiedIterator<THost, ModView<TFunctor> > ..signature:ModifiedString<THost, ModView<TFunctor> > ..param.THost:Original string/iterator. ...type:Concept.RandomAccessIteratorConcept ..param.TFunctor:A unary function (see STL's $unary_function$). ...remarks:The argument type of $TFunctor$ must be $VALUE<THost>::Type$. ..remarks:The @Metafunction.Value@ type of this modifier is the result type of $TFunctor$. ..include:seqan/modifier.h */ template <typename TFunctor> struct ModView {}; template <typename TFunctor> struct ModViewCargo { TFunctor func; }; template <typename THost, typename TFunctor> class ModifiedIterator<THost, ModView<TFunctor> > { public: typedef typename Cargo<ModifiedIterator>::Type TCargo_; Holder<THost, Simple> _host; TCargo_ _cargo; mutable typename Value<ModifiedIterator>::Type tmp_value; ModifiedIterator() : _host(), _cargo() {} explicit ModifiedIterator(THost const & host) : _host(host), _cargo() {} ModifiedIterator(THost const & host, TFunctor const & functor) : _host(host), _cargo() { cargo(*this).func = functor; } explicit ModifiedIterator(TFunctor const & functor) : _host(), _cargo() { cargo(*this).func = functor; } }; // -------------------------------------------------------------------------- // Class ModifiedString // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> class ModifiedString<THost, ModView<TFunctor> > { public: typedef typename Pointer_<THost>::Type THostPointer_; typedef typename Cargo<ModifiedString>::Type TCargo_; typedef typename InnermostHost_<ModifiedString>::Type TInnermostHost_; THostPointer_ _host; TCargo_ _cargo; mutable typename Value<ModifiedString>::Type tmp_value; // Default constructor. ModifiedString() : _host(), _cargo() {} // Construct with the actual host. explicit ModifiedString(THost & host) : _host(_toPointer(host)), _cargo(), tmp_value() {} // Construct with the functor. explicit ModifiedString(TFunctor const & functor) : _host(), _cargo(), tmp_value() { cargo(*this).func = functor; } // Constructor for creating a ModifiedString with const host with a non-const host. template <typename THost_> explicit ModifiedString(THost_ const & host, SEQAN_CTOR_ENABLE_IF(IsSameType<THost, THost_>)) : _host(_toPointer(host)), _cargo(), tmp_value() { ignoreUnusedVariableWarning(dummy); } // Construct with the actual host; variant with functor. ModifiedString(THost & host, TFunctor const & functor) : _host(_toPointer(host)), _cargo(), tmp_value() { cargo(*this).func = functor; } // Constructor for creating a ModifiedString with const host with a non-const host; variant with functor. template <typename THost_> explicit ModifiedString(THost_ const & host, TFunctor const & functor, SEQAN_CTOR_ENABLE_IF(IsSameType<THost, THost_>)) : _host(_toPointer(host)), _cargo(), tmp_value() { ignoreUnusedVariableWarning(dummy); cargo(*this).func = functor; } // Constructor for innermost type; hand down to _host which is a ModifiedString itself. Non-const variant. template <typename THost_> explicit ModifiedString(THost_ & host, SEQAN_CTOR_ENABLE_IF(And<Not<IsSameType<TInnermostHost_, THost> >, IsSameType<TInnermostHost_, THost_> >)) : _host(host), _cargo(), tmp_value() { ignoreUnusedVariableWarning(dummy); } // Constructor for innermost type; hand down to _host which is a ModifiedString itself. Const variant. template <typename THost_> explicit ModifiedString(THost_ const & host, SEQAN_CTOR_ENABLE_IF(And<Not<IsSameType<TInnermostHost_, THost> >, IsSameType<TInnermostHost_, THost_> >)) : _host(host), _cargo(), tmp_value() { ignoreUnusedVariableWarning(dummy); } // Constructor for innermost type; hand down to _host which is a ModifiedString itself. Non-const variant with // functor. template <typename THost_> explicit ModifiedString(THost_ & host, TFunctor const & functor, SEQAN_CTOR_ENABLE_IF(And<Not<IsSameType<TInnermostHost_, THost> >, IsSameType<TInnermostHost_, THost_> >)) : _host(host), _cargo(), tmp_value() { ignoreUnusedVariableWarning(dummy); cargo(*this).func = functor; } // Constructor for innermost type; hand down to _host which is a ModifiedString itself. Const variant with functor. template <typename THost_> explicit ModifiedString(THost_ const & host, TFunctor const & functor, SEQAN_CTOR_ENABLE_IF(And<Not<IsSameType<TInnermostHost_, THost> >, IsSameType<TInnermostHost_, THost_> >)) : _host(host), _cargo(), tmp_value() { ignoreUnusedVariableWarning(dummy); cargo(*this).func = functor; } template <typename TPos> inline typename Reference<ModifiedString>::Type operator[](TPos pos) { return value(*this, pos); } template <typename TPos> inline typename Reference<ModifiedString const>::Type operator[](TPos pos) const { return value(*this, pos); } }; // ========================================================================== // Metafunctions // ========================================================================== // -------------------------------------------------------------------------- // Metafunction Cargo [ModifiedIterator] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> struct Cargo<ModifiedIterator<THost, ModView<TFunctor> > > { typedef ModViewCargo<TFunctor> Type; }; // -------------------------------------------------------------------------- // Metafunction Value [ModifiedIterator] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> struct Value<ModifiedIterator<THost, ModView<TFunctor> > > { typedef typename TFunctor::result_type TResult_; typedef typename RemoveConst_<TResult_>::Type Type; }; // -------------------------------------------------------------------------- // Metafunction GetValue [ModifiedIterator] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> struct GetValue<ModifiedIterator<THost, ModView<TFunctor> > > : Value<ModifiedIterator<THost, ModView<TFunctor> > > {}; // -------------------------------------------------------------------------- // Metafunction Reference [ModifiedIterator] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> struct Reference<ModifiedIterator<THost, ModView<TFunctor> > > { typedef typename Value<ModifiedIterator<THost, ModView<TFunctor> > >::Type & Type; }; // -------------------------------------------------------------------------- // Metafunction Cargo [ModifiedString] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> struct Cargo< ModifiedString<THost, ModView<TFunctor> > > { typedef ModViewCargo<TFunctor> Type; }; // ========================================================================== // Functions // ========================================================================== // -------------------------------------------------------------------------- // Function value() [ModifiedIterator] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> inline typename Reference<ModifiedIterator<THost, ModView<TFunctor> > >::Type value(ModifiedIterator<THost, ModView<TFunctor> > & me) { me.tmp_value = cargo(me).func(getValue(host(me))); return me.tmp_value; } template <typename THost, typename TFunctor> inline typename Reference<ModifiedIterator<THost, ModView<TFunctor> > const>::Type value(ModifiedIterator<THost, ModView<TFunctor> > const & me) { me.tmp_value = cargo(me).func(getValue(host(me))); return me.tmp_value; } // -------------------------------------------------------------------------- // Function getValue() [ModifiedIterator] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor> inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > >::Type getValue(ModifiedIterator<THost, ModView<TFunctor> > & me) { return cargo(me).func(getValue(host(me))); } template <typename THost, typename TFunctor> inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > const>::Type getValue(ModifiedIterator<THost, ModView<TFunctor> > const & me) { return cargo(me).func(getValue(host(me))); } // -------------------------------------------------------------------------- // Function value() [ModifiedString] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor, typename TPos> inline typename Reference<ModifiedString<THost, ModView<TFunctor> > >::Type value(ModifiedString<THost, ModView<TFunctor> > & me, TPos pos) { me.tmp_value = cargo(me).func(getValue(host(me), pos)); return me.tmp_value; } template <typename THost, typename TFunctor, typename TPos> inline typename Reference<ModifiedString<THost, ModView<TFunctor> > const>::Type value(ModifiedString<THost, ModView<TFunctor> > const & me, TPos pos) { me.tmp_value = cargo(me).func(getValue(host(me), pos)); return me.tmp_value; } // -------------------------------------------------------------------------- // Function getValue() [ModifiedString] // -------------------------------------------------------------------------- template <typename THost, typename TFunctor, typename TPos> inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > >::Type getValue(ModifiedString<THost, ModView<TFunctor> > & me, TPos pos) { return cargo(me).func(getValue(host(me), pos)); } template <typename THost, typename TFunctor, typename TPos> inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > const>::Type getValue(ModifiedString<THost, ModView<TFunctor> > const & me, TPos pos) { return cargo(me).func(getValue(host(me), pos)); } // -------------------------------------------------------------------------- // Function convert() // -------------------------------------------------------------------------- template < typename TSequence, typename TFunctor > inline void convert(TSequence & sequence, TFunctor const &F) { #if defined (_OPENMP) && defined (SEQAN_PARALLEL) // OpenMP does not support for loop with iterators. Therefore use index variables. typedef typename Position<TSequence>::Type TPos; typedef typename MakeSigned_<TPos>::Type TSignedPos; #pragma omp parallel for if(length(sequence) > 1000000) for(TSignedPos p = 0; p < (TSignedPos)length(sequence); ++p) sequence[p] = F(sequence[p]); #else typedef typename Iterator<TSequence, Standard>::Type TIter; TIter it = begin(sequence, Standard()); TIter itEnd = end(sequence, Standard()); for(; it != itEnd; ++it) *it = F(*it); #endif } template < typename TSequence, typename TFunctor > inline void convert(TSequence const & sequence, TFunctor const &F) { #if defined (_OPENMP) && defined (SEQAN_PARALLEL) // OpenMP does not support for loop with iterators. Therefore use index variables. typedef typename Position<TSequence>::Type TPos; typedef typename MakeSigned_<TPos>::Type TSignedPos; #pragma omp parallel for if(length(sequence) > 1000000) for(TSignedPos p = 0; p < (TSignedPos)length(sequence); ++p) sequence[p] = F(sequence[p]); #else typedef typename Iterator<TSequence const, Standard>::Type TIter; TIter it = begin(sequence, Standard()); TIter itEnd = end(sequence, Standard()); for(; it != itEnd; ++it) *it = F(*it); #endif } } // namespace seqan #endif // SEQAN_MODIFIER_MODIFIER_VIEW_H_
VolumetricDilatedMaxPooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/VolumetricDilatedMaxPooling.c" #else static inline void THNN_(VolumetricDilatedMaxPooling_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THIndexTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH, bool ceilMode) { int ndim = input->dim(); int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; int64_t nslices; int64_t itime; int64_t iheight; int64_t iwidth; int64_t otime; int64_t oheight; int64_t owidth; THArgCheck(kT > 0 && kW > 0 && kH > 0, 5, "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW); THArgCheck(dT > 0 && dW > 0 && dH > 0, 8, "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); THArgCheck(dilationT > 0 && dilationW > 0 && dilationH > 0, 14, "dilation should be greater than 0, but got dilationT: %d dilationH: %d dilationW: %d", dilationT, dilationH, dilationW); THNN_ARGCHECK(!input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input, "non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s"); if (input->dim() == 5) { dimN++; dimt++; dimh++; dimw++; } THArgCheck(kT/2 >= pT && kW/2 >= pW && kH/2 >= pH, 2, "pad should be smaller than half of kernel size, but got " "kT: %d kW: %d, kH: %d, padT: %d, padW: %d, padH: %d", kT, kW, kH, pT, pW, pH); nslices = input->size(dimN); itime = input->size(dimt); iheight = input->size(dimh); iwidth = input->size(dimw); if (ceilMode) { otime = (int)(ceil((float)(itime - (dilationT * (kT - 1) + 1) + 2*pT) / dT)) + 1; oheight = (int)(ceil((float)(iheight - (dilationH * (kH - 1) + 1) + 2*pH) / dH)) + 1; owidth = (int)(ceil((float)(iwidth - (dilationW * (kW - 1) + 1) + 2*pW) / dW)) + 1; } else { otime = (int)(floor((float)(itime - (dilationT * (kT - 1) + 1) + 2*pT) / dT)) + 1; oheight = (int)(floor((float)(iheight - (dilationH * (kH - 1) + 1) + 2*pH) / dH)) + 1; owidth = (int)(floor((float)(iwidth - (dilationW * (kW - 1) + 1) + 2*pW) / dW)) + 1; } if (pT || pW || pH) { // ensure that the last pooling starts inside the image if ((otime - 1)*dT >= itime + pT) --otime; if ((oheight - 1)*dH >= iheight + pH) --oheight; if ((owidth - 1)*dW >= iwidth + pW) --owidth; } if (otime < 1 || owidth < 1 || oheight < 1) THError("Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small", nslices,itime,iheight,iwidth,nslices,otime,oheight,owidth); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimN, nslices); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, otime); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, oheight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, owidth); } if (indices != NULL) { THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimN, nslices); THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimt, otime); THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimh, oheight); THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimw, owidth); } } static void THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)( real *input_p, real *output_p, THIndex_t *indz_p, int64_t nslices, int64_t itime, int64_t iwidth, int64_t iheight, int64_t otime, int64_t owidth, int64_t oheight, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH) { int64_t k; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { /* loop over output */ int64_t i, j, ti; real *ip = input_p + k * itime * iwidth * iheight; for (ti = 0; ti < otime; ti++) { for (i = 0; i < oheight; i++) { for (j = 0; j < owidth; j++) { /* local pointers */ int64_t start_t = ti * dT - pT; int64_t start_h = i * dH - pH; int64_t start_w = j * dW - pW; int64_t end_t = fminf(start_t + (kT - 1) * dilationT + 1, itime); int64_t end_h = fminf(start_h + (kH - 1) * dilationH + 1, iheight); int64_t end_w = fminf(start_w + (kW - 1) * dilationW + 1, iwidth); while(start_t < 0) start_t += dilationT; while(start_h < 0) start_h += dilationH; while(start_w < 0) start_w += dilationW; real *op = output_p + k * otime * owidth * oheight + ti * owidth * oheight + i * owidth + j; THIndex_t *indzp = indz_p + k * otime * owidth * oheight + ti * owidth * oheight + i * owidth + j; /* compute local max: */ int64_t maxindex = -1; real maxval = -THInf; int64_t x,y,z; int64_t index = 0; for (z = start_t; z < end_t; z += dilationT) { for (y = start_h; y < end_h; y += dilationH) { for (x = start_w; x < end_w; x += dilationW) { index = z * iwidth * iheight + y * iwidth + x; real val = ip[index]; if ((val > maxval) || std::isnan(val)) { maxval = val; maxindex = index; } } } } // store location of max *indzp = maxindex + TH_INDEX_BASE; /* set output to local max */ *op = maxval; } } } } } void THNN_(VolumetricDilatedMaxPooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THIndexTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH, bool ceilMode) { int64_t nslices; int64_t itime; int64_t iheight; int64_t iwidth; int64_t otime; int64_t oheight; int64_t owidth; real *input_data; real *output_data; THIndex_t *indices_data; int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; if (input->dim() == 5) { dimN++; dimt++; dimh++; dimw++; } THNN_(VolumetricDilatedMaxPooling_shapeCheck)( state, input, NULL, NULL, kT, kW, kH, dT, dW, dH, pT, pW, pH, dilationT, dilationW, dilationH, ceilMode); /* sizes */ nslices = input->size(dimN); itime = input->size(dimt); iheight = input->size(dimh); iwidth = input->size(dimw); if (ceilMode) { otime = (int)(ceil((float)(itime - (dilationT * (kT - 1) + 1) + 2*pT) / dT)) + 1; oheight = (int)(ceil((float)(iheight - (dilationH * (kH - 1) + 1) + 2*pH) / dH)) + 1; owidth = (int)(ceil((float)(iwidth - (dilationW * (kW - 1) + 1) + 2*pW) / dW)) + 1; } else { otime = (int)(floor((float)(itime - (dilationT * (kT - 1) + 1) + 2*pT) / dT)) + 1; oheight = (int)(floor((float)(iheight - (dilationH * (kH - 1) + 1) + 2*pH) / dH)) + 1; owidth = (int)(floor((float)(iwidth - (dilationW * (kW - 1) + 1) + 2*pW) / dW)) + 1; } if (pT || pW || pH) { // ensure that the last pooling starts inside the image if ((otime - 1)*dT >= itime + pT) --otime; if ((oheight - 1)*dH >= iheight + pH) --oheight; if ((owidth - 1)*dW >= iwidth + pW) --owidth; } /* get contiguous input */ input = THTensor_(newContiguous)(input); if (input->dim() == 4) /* non-batch mode */ { /* resize output */ THTensor_(resize4d)(output, nslices, otime, oheight, owidth); /* indices will contain ti,i,j uchar locations packed into float/double */ THIndexTensor_(resize4d)(indices, nslices, otime, oheight, owidth); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THIndexTensor_(data)(indices); THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)( input_data, output_data, indices_data, nslices, itime, iwidth, iheight, otime, owidth, oheight, kT, kW, kH, dT, dW, dH, pT, pW, pH, dilationT, dilationW, dilationH ); } else /* batch mode */ { int64_t p; int64_t nBatch = input->size(0); int64_t istride = nslices * itime * iwidth * iheight; int64_t ostride = nslices * otime * owidth * oheight; /* resize output */ THTensor_(resize5d)(output, nBatch, nslices, otime, oheight, owidth); /* indices will contain ti,i,j locations for each output point */ THIndexTensor_(resize5d)(indices, nBatch, nslices, otime, oheight, owidth); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THIndexTensor_(data)(indices); #pragma omp parallel for private(p) for (p=0; p < nBatch; p++) { THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)( input_data + p * istride, output_data + p * ostride, indices_data + p * ostride, nslices, itime, iwidth, iheight, otime, owidth, oheight, kT, kW, kH, dT, dW, dH, pT, pW, pH, dilationT, dilationW, dilationH ); } } /* cleanup */ THTensor_(free)(input); } static void THNN_(VolumetricDilatedMaxPooling_updateGradInput_frame)( real *gradInput_p, real *gradOutput_p, THIndex_t *indz_p, int64_t nslices, int64_t itime, int64_t iwidth, int64_t iheight, int64_t otime, int64_t owidth, int64_t oheight, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH) { int64_t k; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { real *gradInput_p_k = gradInput_p + k * itime * iwidth * iheight; real *gradOutput_p_k = gradOutput_p + k * otime * owidth * oheight; THIndex_t *indz_p_k = indz_p + k * otime * owidth * oheight; /* calculate max points */ int64_t ti, i, j; for (ti = 0; ti < otime; ti++) { for (i = 0; i < oheight; i++) { for (j = 0; j < owidth; j++) { /* retrieve position of max */ int64_t index = ti * oheight * owidth + i * owidth + j; int64_t maxp = indz_p_k[index] - TH_INDEX_BASE; if (maxp != -1) { /* update gradient */ gradInput_p_k[maxp] += gradOutput_p_k[index]; } } } } } } void THNN_(VolumetricDilatedMaxPooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THIndexTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH, bool ceilMode) { int nslices; int itime; int iheight; int iwidth; int otime; int oheight; int owidth; real *gradInput_data; real *gradOutput_data; THIndex_t *indices_data; int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; THNN_(VolumetricDilatedMaxPooling_shapeCheck)( state, input, gradOutput, indices, kT, kW, kH, dT, dW, dH, pT, pW, pH, dilationT, dilationW, dilationH, ceilMode); // TODO: gradOutput shape check /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); if (input->dim() == 5) { dimN++; dimt++; dimh++; dimw++; } /* sizes */ nslices = input->size(dimN); itime = input->size(dimt); iheight = input->size(dimh); iwidth = input->size(dimw); otime = gradOutput->size(dimt); oheight = gradOutput->size(dimh); owidth = gradOutput->size(dimw); /* get raw pointers */ gradInput_data = THTensor_(data)(gradInput); gradOutput_data = THTensor_(data)(gradOutput); indices_data = THIndexTensor_(data)(indices); /* backprop */ if (input->dim() == 4) /* non-batch mode*/ { THNN_(VolumetricDilatedMaxPooling_updateGradInput_frame)( gradInput_data, gradOutput_data, indices_data, nslices, itime, iwidth, iheight, otime, owidth, oheight, dT, dW, dH, pT, pW, pH, dilationT, dilationW, dilationH ); } else /* batch mode */ { int64_t p; int64_t nBatch = input->size(0); int64_t istride = nslices * itime * iwidth * iheight; int64_t ostride = nslices * otime * owidth * oheight; #pragma omp parallel for private(p) for (p = 0; p < nBatch; p++) { THNN_(VolumetricDilatedMaxPooling_updateGradInput_frame)( gradInput_data + p * istride, gradOutput_data + p * ostride, indices_data + p * ostride, nslices, itime, iwidth, iheight, otime, owidth, oheight, dT, dW, dH, pT, pW, pH, dilationT, dilationW, dilationH ); } } /* cleanup */ THTensor_(free)(gradOutput); } #endif
image.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/image.c" #else #undef MAX #define MAX(a,b) ( ((a)>(b)) ? (a) : (b) ) #undef MIN #define MIN(a,b) ( ((a)<(b)) ? (a) : (b) ) #undef TAPI #define TAPI __declspec(dllimport) #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #undef temp_t #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) #define temp_t real #else #define temp_t float #endif static inline real image_(FromIntermediate)(temp_t x) { #ifdef TH_REAL_IS_BYTE x += 0.5; if( x <= 0 ) return 0; if( x >= 255 ) return 255; #endif return x; } static void image_(Main_op_validate)( lua_State *L, THTensor *Tsrc, THTensor *Tdst){ long src_depth = 1; long dst_depth = 1; luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional"); luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional"); if(Tdst->nDimension == 3) dst_depth = Tdst->size[0]; if(Tsrc->nDimension == 3) src_depth = Tsrc->size[0]; if( (Tdst->nDimension==3 && ( src_depth!=dst_depth)) || (Tdst->nDimension!=Tsrc->nDimension) ) luaL_error(L, "image.scale: src and dst depths do not match"); if( Tdst->nDimension==3 && ( src_depth!=dst_depth) ) luaL_error(L, "image.scale: src and dst depths do not match"); } static long image_(Main_op_stride)( THTensor *T,int i){ if (T->nDimension == 2) { if (i == 0) return 0; else return T->stride[i-1]; } return T->stride[i]; } static long image_(Main_op_depth)( THTensor *T){ if(T->nDimension == 3) return T->size[0]; /* rgb or rgba */ return 1; /* greyscale */ } static void image_(Main_scaleLinear_rowcol)(THTensor *Tsrc, THTensor *Tdst, long src_start, long dst_start, long src_stride, long dst_stride, long src_len, long dst_len ) { real *src= THTensor_(data)(Tsrc); real *dst= THTensor_(data)(Tdst); if ( dst_len > src_len ){ long di; float si_f; long si_i; float scale = (float)(src_len - 1) / (dst_len - 1); if ( src_len == 1 ) { for( di = 0; di < dst_len - 1; di++ ) { long dst_pos = dst_start + di*dst_stride; dst[dst_pos] = src[ src_start ]; } } else { for( di = 0; di < dst_len - 1; di++ ) { long dst_pos = dst_start + di*dst_stride; si_f = di * scale; si_i = (long)si_f; si_f -= si_i; dst[dst_pos] = image_(FromIntermediate)( (1 - si_f) * src[ src_start + si_i * src_stride ] + si_f * src[ src_start + (si_i + 1) * src_stride ]); } } dst[ dst_start + (dst_len - 1) * dst_stride ] = src[ src_start + (src_len - 1) * src_stride ]; } else if ( dst_len < src_len ) { long di; long si0_i = 0; float si0_f = 0; long si1_i; float si1_f; long si; float scale = (float)src_len / dst_len; float acc, n; for( di = 0; di < dst_len; di++ ) { si1_f = (di + 1) * scale; si1_i = (long)si1_f; si1_f -= si1_i; acc = (1 - si0_f) * src[ src_start + si0_i * src_stride ]; n = 1 - si0_f; for( si = si0_i + 1; si < si1_i; si++ ) { acc += src[ src_start + si * src_stride ]; n += 1; } if( si1_i < src_len ) { acc += si1_f * src[ src_start + si1_i*src_stride ]; n += si1_f; } dst[ dst_start + di*dst_stride ] = image_(FromIntermediate)(acc / n); si0_i = si1_i; si0_f = si1_f; } } else { long i; for( i = 0; i < dst_len; i++ ) dst[ dst_start + i*dst_stride ] = src[ src_start + i*src_stride ]; } } static inline temp_t image_(Main_cubicInterpolate)(temp_t p0, temp_t p1, temp_t p2, temp_t p3, temp_t x) { temp_t a0 = p1; temp_t a1 = p2 - p0; temp_t a2 = 2 * p0 - 5 * p1 + 4 * p2 - p3; temp_t a3 = 3 * (p1 - p2) + p3 - p0; return a0 + 0.5 * x * (a1 + x * (a2 + x * a3)); } static void image_(Main_scaleCubic_rowcol)(THTensor *Tsrc, THTensor *Tdst, long src_start, long dst_start, long src_stride, long dst_stride, long src_len, long dst_len ) { real *src= THTensor_(data)(Tsrc); real *dst= THTensor_(data)(Tdst); if ( dst_len == src_len ){ long i; for( i = 0; i < dst_len; i++ ) dst[ dst_start + i*dst_stride ] = src[ src_start + i*src_stride ]; } else if ( src_len == 1 ) { long i; for( i = 0; i < dst_len - 1; i++ ) { long dst_pos = dst_start + i*dst_stride; dst[dst_pos] = src[ src_start ]; } } else { long di; float si_f; long si_i; float scale; if (dst_len == 1) scale = (float)(src_len - 1); else scale = (float)(src_len - 1) / (dst_len - 1); for( di = 0; di < dst_len - 1; di++ ) { long dst_pos = dst_start + di*dst_stride; si_f = di * scale; si_i = (long)si_f; si_f -= si_i; temp_t p0; temp_t p1 = src[ src_start + si_i * src_stride ]; temp_t p2 = src[ src_start + (si_i + 1) * src_stride ]; temp_t p3; if (si_i > 0) { p0 = src[ src_start + (si_i - 1) * src_stride ]; } else { p0 = 2 * p1 - p2; } if (si_i + 2 < src_len) { p3 = src[ src_start + (si_i + 2) * src_stride ]; } else { p3 = 2 * p2 - p1; } temp_t value = image_(Main_cubicInterpolate)(p0, p1, p2, p3, si_f); dst[dst_pos] = image_(FromIntermediate)(value); } dst[ dst_start + (dst_len - 1) * dst_stride ] = src[ src_start + (src_len - 1) * src_stride ]; } } static int image_(Main_scaleBilinear)(lua_State *L) { THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor); THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor); THTensor *Ttmp; long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height; long src_stride0, src_stride1, src_stride2, src_width, src_height; long tmp_stride0, tmp_stride1, tmp_stride2, tmp_width, tmp_height; long i, j, k; image_(Main_op_validate)(L, Tsrc,Tdst); int ndims; if (Tdst->nDimension == 3) ndims = 3; else ndims = 2; Ttmp = THTensor_(newWithSize2d)(Tsrc->size[ndims-2], Tdst->size[ndims-1]); dst_stride0= image_(Main_op_stride)(Tdst,0); dst_stride1= image_(Main_op_stride)(Tdst,1); dst_stride2= image_(Main_op_stride)(Tdst,2); src_stride0= image_(Main_op_stride)(Tsrc,0); src_stride1= image_(Main_op_stride)(Tsrc,1); src_stride2= image_(Main_op_stride)(Tsrc,2); tmp_stride0= image_(Main_op_stride)(Ttmp,0); tmp_stride1= image_(Main_op_stride)(Ttmp,1); tmp_stride2= image_(Main_op_stride)(Ttmp,2); dst_width= Tdst->size[ndims-1]; dst_height= Tdst->size[ndims-2]; src_width= Tsrc->size[ndims-1]; src_height= Tsrc->size[ndims-2]; tmp_width= Ttmp->size[1]; tmp_height= Ttmp->size[0]; for(k=0;k<image_(Main_op_depth)(Tsrc);k++) { /* compress/expand rows first */ for(j = 0; j < src_height; j++) { image_(Main_scaleLinear_rowcol)(Tsrc, Ttmp, 0*src_stride2+j*src_stride1+k*src_stride0, 0*tmp_stride2+j*tmp_stride1+k*tmp_stride0, src_stride2, tmp_stride2, src_width, tmp_width ); } /* then columns */ for(i = 0; i < dst_width; i++) { image_(Main_scaleLinear_rowcol)(Ttmp, Tdst, i*tmp_stride2+0*tmp_stride1+k*tmp_stride0, i*dst_stride2+0*dst_stride1+k*dst_stride0, tmp_stride1, dst_stride1, tmp_height, dst_height ); } } THTensor_(free)(Ttmp); return 0; } static int image_(Main_scaleBicubic)(lua_State *L) { THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor); THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor); THTensor *Ttmp; long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height; long src_stride0, src_stride1, src_stride2, src_width, src_height; long tmp_stride0, tmp_stride1, tmp_stride2, tmp_width, tmp_height; long i, j, k; image_(Main_op_validate)(L, Tsrc,Tdst); int ndims; if (Tdst->nDimension == 3) ndims = 3; else ndims = 2; Ttmp = THTensor_(newWithSize2d)(Tsrc->size[ndims-2], Tdst->size[ndims-1]); dst_stride0= image_(Main_op_stride)(Tdst,0); dst_stride1= image_(Main_op_stride)(Tdst,1); dst_stride2= image_(Main_op_stride)(Tdst,2); src_stride0= image_(Main_op_stride)(Tsrc,0); src_stride1= image_(Main_op_stride)(Tsrc,1); src_stride2= image_(Main_op_stride)(Tsrc,2); tmp_stride0= image_(Main_op_stride)(Ttmp,0); tmp_stride1= image_(Main_op_stride)(Ttmp,1); tmp_stride2= image_(Main_op_stride)(Ttmp,2); dst_width= Tdst->size[ndims-1]; dst_height= Tdst->size[ndims-2]; src_width= Tsrc->size[ndims-1]; src_height= Tsrc->size[ndims-2]; tmp_width= Ttmp->size[1]; tmp_height= Ttmp->size[0]; for(k=0;k<image_(Main_op_depth)(Tsrc);k++) { /* compress/expand rows first */ for(j = 0; j < src_height; j++) { image_(Main_scaleCubic_rowcol)(Tsrc, Ttmp, 0*src_stride2+j*src_stride1+k*src_stride0, 0*tmp_stride2+j*tmp_stride1+k*tmp_stride0, src_stride2, tmp_stride2, src_width, tmp_width ); } /* then columns */ for(i = 0; i < dst_width; i++) { image_(Main_scaleCubic_rowcol)(Ttmp, Tdst, i*tmp_stride2+0*tmp_stride1+k*tmp_stride0, i*dst_stride2+0*dst_stride1+k*dst_stride0, tmp_stride1, dst_stride1, tmp_height, dst_height ); } } THTensor_(free)(Ttmp); return 0; } static int image_(Main_scaleSimple)(lua_State *L) { THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor); THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor); real *src, *dst; long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth; long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth; long i, j, k; float scx, scy; luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "image.scale: src not 2 or 3 dimensional"); luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "image.scale: dst not 2 or 3 dimensional"); src= THTensor_(data)(Tsrc); dst= THTensor_(data)(Tdst); dst_stride0 = 0; dst_stride1 = Tdst->stride[Tdst->nDimension-2]; dst_stride2 = Tdst->stride[Tdst->nDimension-1]; dst_depth = 0; dst_height = Tdst->size[Tdst->nDimension-2]; dst_width = Tdst->size[Tdst->nDimension-1]; if(Tdst->nDimension == 3) { dst_stride0 = Tdst->stride[0]; dst_depth = Tdst->size[0]; } src_stride0 = 0; src_stride1 = Tsrc->stride[Tsrc->nDimension-2]; src_stride2 = Tsrc->stride[Tsrc->nDimension-1]; src_depth = 0; src_height = Tsrc->size[Tsrc->nDimension-2]; src_width = Tsrc->size[Tsrc->nDimension-1]; if(Tsrc->nDimension == 3) { src_stride0 = Tsrc->stride[0]; src_depth = Tsrc->size[0]; } if( (Tdst->nDimension==3 && ( src_depth!=dst_depth)) || (Tdst->nDimension!=Tsrc->nDimension) ) { printf("image.scale:%d,%d,%ld,%ld\n",Tsrc->nDimension,Tdst->nDimension,src_depth,dst_depth); luaL_error(L, "image.scale: src and dst depths do not match"); } if( Tdst->nDimension==3 && ( src_depth!=dst_depth) ) luaL_error(L, "image.scale: src and dst depths do not match"); /* printf("%d,%d -> %d,%d\n",src_width,src_height,dst_width,dst_height); */ scx=((float)src_width)/((float)dst_width); scy=((float)src_height)/((float)dst_height); #pragma omp parallel for private(j, i, k) for(j = 0; j < dst_height; j++) { for(i = 0; i < dst_width; i++) { float val = 0.0; long ii=(long) (((float)i)*scx); long jj=(long) (((float)j)*scy); if(ii>src_width-1) ii=src_width-1; if(jj>src_height-1) jj=src_height-1; if(Tsrc->nDimension==2) { val=src[ii*src_stride2+jj*src_stride1]; dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val); } else { for(k=0;k<src_depth;k++) { val=src[ii*src_stride2+jj*src_stride1+k*src_stride0]; dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val); } } } } return 0; } static int image_(Main_rotate)(lua_State *L) { THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor); THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor); float theta = luaL_checknumber(L, 3); float cos_theta, sin_theta; real *src, *dst; long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth; long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth; long i, j, k; float xc, yc; float id,jd; long ii,jj; luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional"); luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional"); src= THTensor_(data)(Tsrc); dst= THTensor_(data)(Tdst); if (dst == src) { luaL_error(L, "image.rotate: in-place rotate not supported"); } dst_stride0 = 0; dst_stride1 = Tdst->stride[Tdst->nDimension-2]; dst_stride2 = Tdst->stride[Tdst->nDimension-1]; dst_depth = 0; dst_height = Tdst->size[Tdst->nDimension-2]; dst_width = Tdst->size[Tdst->nDimension-1]; if(Tdst->nDimension == 3) { dst_stride0 = Tdst->stride[0]; dst_depth = Tdst->size[0]; } src_stride0 = 0; src_stride1 = Tsrc->stride[Tsrc->nDimension-2]; src_stride2 = Tsrc->stride[Tsrc->nDimension-1]; src_depth = 0; src_height = Tsrc->size[Tsrc->nDimension-2]; src_width = Tsrc->size[Tsrc->nDimension-1]; if(Tsrc->nDimension == 3) { src_stride0 = Tsrc->stride[0]; src_depth = Tsrc->size[0]; } if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) ) luaL_error(L, "image.rotate: src and dst depths do not match"); if( (Tsrc->nDimension!=Tdst->nDimension) ) luaL_error(L, "image.rotate: src and dst depths do not match"); xc = (src_width-1)/2.0; yc = (src_height-1)/2.0; sin_theta = sin(theta); cos_theta = cos(theta); for(j = 0; j < dst_height; j++) { jd=j; for(i = 0; i < dst_width; i++) { float val = -1; id= i; ii = (long) round(cos_theta*(id-xc) - sin_theta*(jd-yc) + xc); jj = (long) round(cos_theta*(jd-yc) + sin_theta*(id-xc) + yc); /* rotated corners are blank */ if(ii>src_width-1) val=0; if(jj>src_height-1) val=0; if(ii<0) val=0; if(jj<0) val=0; if(Tsrc->nDimension==2) { if(val==-1) val=src[ii*src_stride2+jj*src_stride1]; dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val); } else { int do_copy=0; if(val==-1) do_copy=1; for(k=0;k<src_depth;k++) { if(do_copy) val=src[ii*src_stride2+jj*src_stride1+k*src_stride0]; dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val); } } } } return 0; } static int image_(Main_rotateBilinear)(lua_State *L) { THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor); THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor); float theta = luaL_checknumber(L, 3); real *src, *dst; long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth; long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth; long i, j, k; float xc, yc; float id,jd; long ii_0, ii_1, jj_0, jj_1; luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional"); luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional"); src= THTensor_(data)(Tsrc); dst= THTensor_(data)(Tdst); if (dst == src) { luaL_error(L, "image.rotate: in-place rotate not supported"); } dst_stride0 = 0; dst_stride1 = Tdst->stride[Tdst->nDimension-2]; dst_stride2 = Tdst->stride[Tdst->nDimension-1]; dst_depth = 0; dst_height = Tdst->size[Tdst->nDimension-2]; dst_width = Tdst->size[Tdst->nDimension-1]; if(Tdst->nDimension == 3) { dst_stride0 = Tdst->stride[0]; dst_depth = Tdst->size[0]; } src_stride0 = 0; src_stride1 = Tsrc->stride[Tsrc->nDimension-2]; src_stride2 = Tsrc->stride[Tsrc->nDimension-1]; src_depth = 0; src_height = Tsrc->size[Tsrc->nDimension-2]; src_width = Tsrc->size[Tsrc->nDimension-1]; if(Tsrc->nDimension == 3) { src_stride0 = Tsrc->stride[0]; src_depth = Tsrc->size[0]; } if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) ) luaL_error(L, "image.rotate: src and dst depths do not match"); if( (Tsrc->nDimension!=Tdst->nDimension) ) luaL_error(L, "image.rotate: src and dst depths do not match"); xc = (src_width-1)/2.0; yc = (src_height-1)/2.0; for(j = 0; j < dst_height; j++) { jd=j; for(i = 0; i < dst_width; i++) { float val = -1; temp_t ri, rj, wi, wj; id= i; ri = cos(theta)*(id-xc)-sin(theta)*(jd-yc); rj = cos(theta)*(jd-yc)+sin(theta)*(id-xc); ii_0 = (long)floor(ri+xc); ii_1 = ii_0 + 1; jj_0 = (long)floor(rj+yc); jj_1 = jj_0 + 1; wi = ri+xc-ii_0; wj = rj+yc-jj_0; /* default to the closest value when interpolating on image boundaries (either image pixel or 0) */ if(ii_1==src_width && wi<0.5) ii_1 = ii_0; else if(ii_1>=src_width) val=0; if(jj_1==src_height && wj<0.5) jj_1 = jj_0; else if(jj_1>=src_height) val=0; if(ii_0==-1 && wi>0.5) ii_0 = ii_1; else if(ii_0<0) val=0; if(jj_0==-1 && wj>0.5) jj_0 = jj_1; else if(jj_0<0) val=0; if(Tsrc->nDimension==2) { if(val==-1) val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1] + wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1] + (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1] + wi * wj * src[ii_1*src_stride2+jj_1*src_stride1]; dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val); } else { int do_copy=0; if(val==-1) do_copy=1; for(k=0;k<src_depth;k++) { if(do_copy) { val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0] + wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1+k*src_stride0] + (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1+k*src_stride0] + wi * wj * src[ii_1*src_stride2+jj_1*src_stride1+k*src_stride0]; } dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val); } } } } return 0; } static int image_(Main_polar)(lua_State *L) { THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor); THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor); float doFull = luaL_checknumber(L, 3); real *src, *dst; long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth; long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth; long i, j, k; float id, jd, a, r, m, midY, midX; long ii,jj; luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "polar: src not 2 or 3 dimensional"); luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "polar: dst not 2 or 3 dimensional"); src= THTensor_(data)(Tsrc); dst= THTensor_(data)(Tdst); dst_stride0 = 0; dst_stride1 = Tdst->stride[Tdst->nDimension-2]; dst_stride2 = Tdst->stride[Tdst->nDimension-1]; dst_depth = 0; dst_height = Tdst->size[Tdst->nDimension-2]; dst_width = Tdst->size[Tdst->nDimension-1]; if(Tdst->nDimension == 3) { dst_stride0 = Tdst->stride[0]; dst_depth = Tdst->size[0]; } src_stride0 = 0; src_stride1 = Tsrc->stride[Tsrc->nDimension-2]; src_stride2 = Tsrc->stride[Tsrc->nDimension-1]; src_depth = 0; src_height = Tsrc->size[Tsrc->nDimension-2]; src_width = Tsrc->size[Tsrc->nDimension-1]; if(Tsrc->nDimension == 3) { src_stride0 = Tsrc->stride[0]; src_depth = Tsrc->size[0]; } if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) ) { luaL_error(L, "image.polar: src and dst depths do not match"); } if( (Tsrc->nDimension!=Tdst->nDimension) ) { luaL_error(L, "image.polar: src and dst depths do not match"); } // compute maximum distance midY = (float) src_height / 2.0; midX = (float) src_width / 2.0; if(doFull == 1) { m = sqrt((float) src_width * (float) src_width + (float) src_height * (float) src_height) / 2.0; } else { m = (src_width < src_height) ? midX : midY; } // loop to fill polar image for(j = 0; j < dst_height; j++) { // orientation loop jd = (float) j; a = (2 * M_PI * jd) / (float) dst_height; // current angle for(i = 0; i < dst_width; i++) { // radius loop float val = -1; id = (float) i; r = (m * id) / (float) dst_width; // current distance jj = (long) floor( r * cos(a) + midY); // y-location in source image ii = (long) floor(-r * sin(a) + midX); // x-location in source image if(ii>src_width-1) val=0; if(jj>src_height-1) val=0; if(ii<0) val=0; if(jj<0) val=0; if(Tsrc->nDimension==2) { if(val==-1) val=src[ii*src_stride2+jj*src_stride1]; dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val); } else { int do_copy=0; if(val==-1) do_copy=1; for(k=0;k<src_depth;k++) { if(do_copy) val=src[ii*src_stride2+jj*src_stride1+k*src_stride0]; dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val); } } } } return 0; } static int image_(Main_polarBilinear)(lua_State *L) { THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor); THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor); float doFull = luaL_checknumber(L, 3); real *src, *dst; long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth; long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth; long i, j, k; float id, jd, a, r, m, midY, midX; long ii_0, ii_1, jj_0, jj_1; luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "polar: src not 2 or 3 dimensional"); luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "polar: dst not 2 or 3 dimensional"); src= THTensor_(data)(Tsrc); dst= THTensor_(data)(Tdst); dst_stride0 = 0; dst_stride1 = Tdst->stride[Tdst->nDimension-2]; dst_stride2 = Tdst->stride[Tdst->nDimension-1]; dst_depth = 0; dst_height = Tdst->size[Tdst->nDimension-2]; dst_width = Tdst->size[Tdst->nDimension-1]; if(Tdst->nDimension == 3) { dst_stride0 = Tdst->stride[0]; dst_depth = Tdst->size[0]; } src_stride0 = 0; src_stride1 = Tsrc->stride[Tsrc->nDimension-2]; src_stride2 = Tsrc->stride[Tsrc->nDimension-1]; src_depth = 0; src_height = Tsrc->size[Tsrc->nDimension-2]; src_width = Tsrc->size[Tsrc->nDimension-1]; if(Tsrc->nDimension == 3) { src_stride0 = Tsrc->stride[0]; src_depth = Tsrc->size[0]; } if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) ) { luaL_error(L, "image.polar: src and dst depths do not match"); } if( (Tsrc->nDimension!=Tdst->nDimension) ) { luaL_error(L, "image.polar: src and dst depths do not match"); } // compute maximum distance midY = (float) src_height / 2.0; midX = (float) src_width / 2.0; if(doFull == 1) { m = sqrt((float) src_width * (float) src_width + (float) src_height * (float) src_height) / 2.0; } else { m = (src_width < src_height) ? midX : midY; } // loop to fill polar image for(j = 0; j < dst_height; j++) { // orientation loop jd = (float) j; a = (2 * M_PI * jd) / (float) dst_height; // current angle for(i = 0; i < dst_width; i++) { // radius loop float val = -1; temp_t ri, rj, wi, wj; id = (float) i; r = (m * id) / (float) dst_width; // current distance rj = r * cos(a) + midY; // y-location in source image ri = -r * sin(a) + midX; // x-location in source image ii_0=(long)floor(ri); ii_1=ii_0 + 1; jj_0=(long)floor(rj); jj_1=jj_0 + 1; wi = ri - ii_0; wj = rj - jj_0; // switch to nearest interpolation when bilinear is impossible if(ii_1>src_width-1 || jj_1>src_height-1 || ii_0<0 || jj_0<0) { if(ii_0>src_width-1) val=0; if(jj_0>src_height-1) val=0; if(ii_0<0) val=0; if(jj_0<0) val=0; if(Tsrc->nDimension==2) { if(val==-1) val=src[ii_0*src_stride2+jj_0*src_stride1]; dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val); } else { int do_copy=0; if(val==-1) do_copy=1; for(k=0;k<src_depth;k++) { if(do_copy) val=src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0]; dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val); } } } // bilinear interpolation else { if(Tsrc->nDimension==2) { if(val==-1) val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1] + wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1] + (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1] + wi * wj * src[ii_1*src_stride2+jj_1*src_stride1]; dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val); } else { int do_copy=0; if(val==-1) do_copy=1; for(k=0;k<src_depth;k++) { if(do_copy) { val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0] + wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1+k*src_stride0] + (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1+k*src_stride0] + wi * wj * src[ii_1*src_stride2+jj_1*src_stride1+k*src_stride0]; } dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val); } } } } } return 0; } static int image_(Main_logPolar)(lua_State *L) { THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor); THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor); float doFull = luaL_checknumber(L, 3); real *src, *dst; long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth; long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth; long i, j, k; float id, jd, a, r, m, midY, midX, fw; long ii,jj; luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "polar: src not 2 or 3 dimensional"); luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "polar: dst not 2 or 3 dimensional"); src= THTensor_(data)(Tsrc); dst= THTensor_(data)(Tdst); dst_stride0 = 0; dst_stride1 = Tdst->stride[Tdst->nDimension-2]; dst_stride2 = Tdst->stride[Tdst->nDimension-1]; dst_depth = 0; dst_height = Tdst->size[Tdst->nDimension-2]; dst_width = Tdst->size[Tdst->nDimension-1]; if(Tdst->nDimension == 3) { dst_stride0 = Tdst->stride[0]; dst_depth = Tdst->size[0]; } src_stride0 = 0; src_stride1 = Tsrc->stride[Tsrc->nDimension-2]; src_stride2 = Tsrc->stride[Tsrc->nDimension-1]; src_depth = 0; src_height = Tsrc->size[Tsrc->nDimension-2]; src_width = Tsrc->size[Tsrc->nDimension-1]; if(Tsrc->nDimension == 3) { src_stride0 = Tsrc->stride[0]; src_depth = Tsrc->size[0]; } if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) ) { luaL_error(L, "image.polar: src and dst depths do not match"); } if( (Tsrc->nDimension!=Tdst->nDimension) ) { luaL_error(L, "image.polar: src and dst depths do not match"); } // compute maximum distance midY = (float) src_height / 2.0; midX = (float) src_width / 2.0; if(doFull == 1) { m = sqrt((float) src_width * (float) src_width + (float) src_height * (float) src_height) / 2.0; } else { m = (src_width < src_height) ? midX : midY; } // loop to fill polar image fw = log(m) / (float) dst_width; for(j = 0; j < dst_height; j++) { // orientation loop jd = (float) j; a = (2 * M_PI * jd) / (float) dst_height; // current angle for(i = 0; i < dst_width; i++) { // radius loop float val = -1; id = (float) i; r = exp(id * fw); jj = (long) floor( r * cos(a) + midY); // y-location in source image ii = (long) floor(-r * sin(a) + midX); // x-location in source image if(ii>src_width-1) val=0; if(jj>src_height-1) val=0; if(ii<0) val=0; if(jj<0) val=0; if(Tsrc->nDimension==2) { if(val==-1) val=src[ii*src_stride2+jj*src_stride1]; dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val); } else { int do_copy=0; if(val==-1) do_copy=1; for(k=0;k<src_depth;k++) { if(do_copy) val=src[ii*src_stride2+jj*src_stride1+k*src_stride0]; dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val); } } } } return 0; } static int image_(Main_logPolarBilinear)(lua_State *L) { THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor); THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor); float doFull = luaL_checknumber(L, 3); real *src, *dst; long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth; long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth; long i, j, k; float id, jd, a, r, m, midY, midX, fw; long ii_0, ii_1, jj_0, jj_1; luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "polar: src not 2 or 3 dimensional"); luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "polar: dst not 2 or 3 dimensional"); src= THTensor_(data)(Tsrc); dst= THTensor_(data)(Tdst); dst_stride0 = 0; dst_stride1 = Tdst->stride[Tdst->nDimension-2]; dst_stride2 = Tdst->stride[Tdst->nDimension-1]; dst_depth = 0; dst_height = Tdst->size[Tdst->nDimension-2]; dst_width = Tdst->size[Tdst->nDimension-1]; if(Tdst->nDimension == 3) { dst_stride0 = Tdst->stride[0]; dst_depth = Tdst->size[0]; } src_stride0 = 0; src_stride1 = Tsrc->stride[Tsrc->nDimension-2]; src_stride2 = Tsrc->stride[Tsrc->nDimension-1]; src_depth = 0; src_height = Tsrc->size[Tsrc->nDimension-2]; src_width = Tsrc->size[Tsrc->nDimension-1]; if(Tsrc->nDimension == 3) { src_stride0 = Tsrc->stride[0]; src_depth = Tsrc->size[0]; } if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) ) { luaL_error(L, "image.polar: src and dst depths do not match"); } if( (Tsrc->nDimension!=Tdst->nDimension) ) { luaL_error(L, "image.polar: src and dst depths do not match"); } // compute maximum distance midY = (float) src_height / 2.0; midX = (float) src_width / 2.0; if(doFull == 1) { m = sqrt((float) src_width * (float) src_width + (float) src_height * (float) src_height) / 2.0; } else { m = (src_width < src_height) ? midX : midY; } // loop to fill polar image fw = log(m) / (float) dst_width; for(j = 0; j < dst_height; j++) { // orientation loop jd = (float) j; a = (2 * M_PI * jd) / (float) dst_height; // current angle for(i = 0; i < dst_width; i++) { // radius loop float val = -1; float ri, rj, wi, wj; id = (float) i; r = exp(id * fw); rj = r * cos(a) + midY; // y-location in source image ri = -r * sin(a) + midX; // x-location in source image ii_0=(long)floor(ri); ii_1=ii_0 + 1; jj_0=(long)floor(rj); jj_1=jj_0 + 1; wi = ri - ii_0; wj = rj - jj_0; // switch to nearest interpolation when bilinear is impossible if(ii_1>src_width-1 || jj_1>src_height-1 || ii_0<0 || jj_0<0) { if(ii_0>src_width-1) val=0; if(jj_0>src_height-1) val=0; if(ii_0<0) val=0; if(jj_0<0) val=0; if(Tsrc->nDimension==2) { if(val==-1) val=src[ii_0*src_stride2+jj_0*src_stride1]; dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val); } else { int do_copy=0; if(val==-1) do_copy=1; for(k=0;k<src_depth;k++) { if(do_copy) val=src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0]; dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val); } } } // bilinear interpolation else { if(Tsrc->nDimension==2) { if(val==-1) val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1] + wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1] + (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1] + wi * wj * src[ii_1*src_stride2+jj_1*src_stride1]; dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val); } else { int do_copy=0; if(val==-1) do_copy=1; for(k=0;k<src_depth;k++) { if(do_copy) { val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0] + wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1+k*src_stride0] + (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1+k*src_stride0] + wi * wj * src[ii_1*src_stride2+jj_1*src_stride1+k*src_stride0]; } dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val); } } } } } return 0; } static int image_(Main_cropNoScale)(lua_State *L) { THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor); THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor); long startx = luaL_checklong(L, 3); long starty = luaL_checklong(L, 4); real *src, *dst; long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth; long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth; long i, j, k; luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional"); luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional"); src= THTensor_(data)(Tsrc); dst= THTensor_(data)(Tdst); dst_stride0 = 0; dst_stride1 = Tdst->stride[Tdst->nDimension-2]; dst_stride2 = Tdst->stride[Tdst->nDimension-1]; dst_depth = 0; dst_height = Tdst->size[Tdst->nDimension-2]; dst_width = Tdst->size[Tdst->nDimension-1]; if(Tdst->nDimension == 3) { dst_stride0 = Tdst->stride[0]; dst_depth = Tdst->size[0]; } src_stride0 = 0; src_stride1 = Tsrc->stride[Tsrc->nDimension-2]; src_stride2 = Tsrc->stride[Tsrc->nDimension-1]; src_depth = 0; src_height = Tsrc->size[Tsrc->nDimension-2]; src_width = Tsrc->size[Tsrc->nDimension-1]; if(Tsrc->nDimension == 3) { src_stride0 = Tsrc->stride[0]; src_depth = Tsrc->size[0]; } if( startx<0 || starty<0 || (startx+dst_width>src_width) || (starty+dst_height>src_height)) luaL_error(L, "image.crop: crop goes outside bounds of src"); if( Tdst->nDimension==3 && ( src_depth!=dst_depth) ) luaL_error(L, "image.crop: src and dst depths do not match"); for(j = 0; j < dst_height; j++) { for(i = 0; i < dst_width; i++) { float val = 0.0; long ii=i+startx; long jj=j+starty; if(Tsrc->nDimension==2) { val=src[ii*src_stride2+jj*src_stride1]; dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val); } else { for(k=0;k<src_depth;k++) { val=src[ii*src_stride2+jj*src_stride1+k*src_stride0]; dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val); } } } } return 0; } static int image_(Main_translate)(lua_State *L) { THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor); THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor); long shiftx = luaL_checklong(L, 3); long shifty = luaL_checklong(L, 4); real *src, *dst; long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth; long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth; long i, j, k; luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional"); luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional"); src= THTensor_(data)(Tsrc); dst= THTensor_(data)(Tdst); dst_stride0 = 1; dst_stride1 = Tdst->stride[Tdst->nDimension-2]; dst_stride2 = Tdst->stride[Tdst->nDimension-1]; dst_depth = 1; dst_height = Tdst->size[Tdst->nDimension-2]; dst_width = Tdst->size[Tdst->nDimension-1]; if(Tdst->nDimension == 3) { dst_stride0 = Tdst->stride[0]; dst_depth = Tdst->size[0]; } src_stride0 = 1; src_stride1 = Tsrc->stride[Tsrc->nDimension-2]; src_stride2 = Tsrc->stride[Tsrc->nDimension-1]; src_depth = 1; src_height = Tsrc->size[Tsrc->nDimension-2]; src_width = Tsrc->size[Tsrc->nDimension-1]; if(Tsrc->nDimension == 3) { src_stride0 = Tsrc->stride[0]; src_depth = Tsrc->size[0]; } if( Tdst->nDimension==3 && ( src_depth!=dst_depth) ) luaL_error(L, "image.translate: src and dst depths do not match"); for(j = 0; j < src_height; j++) { for(i = 0; i < src_width; i++) { long ii=i+shiftx; long jj=j+shifty; // Check it's within destination bounds, else crop if(ii<dst_width && jj<dst_height && ii>=0 && jj>=0) { for(k=0;k<src_depth;k++) { dst[ii*dst_stride2+jj*dst_stride1+k*dst_stride0] = src[i*src_stride2+j*src_stride1+k*src_stride0]; } } } } return 0; } static int image_(Main_saturate)(lua_State *L) { #ifdef TH_REAL_IS_BYTE // Noop since necessarily constrained to [0, 255]. #else THTensor *input = luaT_checkudata(L, 1, torch_Tensor); THTensor *output = input; TH_TENSOR_APPLY2(real, output, real, input, \ *output_data = (*input_data < 0) ? 0 : (*input_data > 1) ? 1 : *input_data;) #endif return 1; } /* * Converts an RGB color value to HSL. Conversion formula * adapted from http://en.wikipedia.org/wiki/HSL_color_space. * Assumes r, g, and b are contained in the set [0, 1] and * returns h, s, and l in the set [0, 1]. */ int image_(Main_rgb2hsl)(lua_State *L) { THTensor *rgb = luaT_checkudata(L, 1, torch_Tensor); THTensor *hsl = luaT_checkudata(L, 2, torch_Tensor); int y,x; temp_t r, g, b, h, s, l; for (y=0; y<rgb->size[1]; y++) { for (x=0; x<rgb->size[2]; x++) { // get Rgb r = THTensor_(get3d)(rgb, 0, y, x); g = THTensor_(get3d)(rgb, 1, y, x); b = THTensor_(get3d)(rgb, 2, y, x); #ifdef TH_REAL_IS_BYTE r /= 255; g /= 255; b /= 255; #endif temp_t mx = max(max(r, g), b); temp_t mn = min(min(r, g), b); if(mx == mn) { h = 0; // achromatic s = 0; l = mx; } else { temp_t d = mx - mn; if (mx == r) { h = (g - b) / d + (g < b ? 6 : 0); } else if (mx == g) { h = (b - r) / d + 2; } else { h = (r - g) / d + 4; } h /= 6; l = (mx + mn) / 2; s = l > 0.5 ? d / (2 - mx - mn) : d / (mx + mn); } // set hsl #ifdef TH_REAL_IS_BYTE h *= 255; s *= 255; l *= 255; #endif THTensor_(set3d)(hsl, 0, y, x, image_(FromIntermediate)(h)); THTensor_(set3d)(hsl, 1, y, x, image_(FromIntermediate)(s)); THTensor_(set3d)(hsl, 2, y, x, image_(FromIntermediate)(l)); } } return 0; } // helper static inline temp_t image_(hue2rgb)(temp_t p, temp_t q, temp_t t) { if (t < 0.) t += 1; if (t > 1.) t -= 1; if (t < 1./6) return p + (q - p) * 6. * t; else if (t < 1./2) return q; else if (t < 2./3) return p + (q - p) * (2./3 - t) * 6.; else return p; } /* * Converts an HSL color value to RGB. Conversion formula * adapted from http://en.wikipedia.org/wiki/HSL_color_space. * Assumes h, s, and l are contained in the set [0, 1] and * returns r, g, and b in the set [0, 1]. */ int image_(Main_hsl2rgb)(lua_State *L) { THTensor *hsl = luaT_checkudata(L, 1, torch_Tensor); THTensor *rgb = luaT_checkudata(L, 2, torch_Tensor); int y,x; temp_t r, g, b, h, s, l; for (y=0; y<hsl->size[1]; y++) { for (x=0; x<hsl->size[2]; x++) { // get hsl h = THTensor_(get3d)(hsl, 0, y, x); s = THTensor_(get3d)(hsl, 1, y, x); l = THTensor_(get3d)(hsl, 2, y, x); #ifdef TH_REAL_IS_BYTE h /= 255; s /= 255; l /= 255; #endif if(s == 0) { // achromatic r = l; g = l; b = l; } else { temp_t q = (l < 0.5) ? (l * (1 + s)) : (l + s - l * s); temp_t p = 2 * l - q; temp_t hr = h + 1./3; temp_t hg = h; temp_t hb = h - 1./3; r = image_(hue2rgb)(p, q, hr); g = image_(hue2rgb)(p, q, hg); b = image_(hue2rgb)(p, q, hb); } // set rgb #ifdef TH_REAL_IS_BYTE r *= 255; g *= 255; b *= 255; #endif THTensor_(set3d)(rgb, 0, y, x, image_(FromIntermediate)(r)); THTensor_(set3d)(rgb, 1, y, x, image_(FromIntermediate)(g)); THTensor_(set3d)(rgb, 2, y, x, image_(FromIntermediate)(b)); } } return 0; } /* * Converts an RGB color value to HSV. Conversion formula * adapted from http://en.wikipedia.org/wiki/HSV_color_space. * Assumes r, g, and b are contained in the set [0, 1] and * returns h, s, and v in the set [0, 1]. */ int image_(Main_rgb2hsv)(lua_State *L) { THTensor *rgb = luaT_checkudata(L, 1, torch_Tensor); THTensor *hsv = luaT_checkudata(L, 2, torch_Tensor); int y, x; temp_t r, g, b, h, s, v; for (y=0; y<rgb->size[1]; y++) { for (x=0; x<rgb->size[2]; x++) { // get Rgb r = THTensor_(get3d)(rgb, 0, y, x); g = THTensor_(get3d)(rgb, 1, y, x); b = THTensor_(get3d)(rgb, 2, y, x); #ifdef TH_REAL_IS_BYTE r /= 255; g /= 255; b /= 255; #endif temp_t mx = max(max(r, g), b); temp_t mn = min(min(r, g), b); if(mx == mn) { // achromatic h = 0; s = 0; v = mx; } else { temp_t d = mx - mn; if (mx == r) { h = (g - b) / d + (g < b ? 6 : 0); } else if (mx == g) { h = (b - r) / d + 2; } else { h = (r - g) / d + 4; } h /= 6; s = d / mx; v = mx; } // set hsv #ifdef TH_REAL_IS_BYTE h *= 255; s *= 255; v *= 255; #endif THTensor_(set3d)(hsv, 0, y, x, image_(FromIntermediate)(h)); THTensor_(set3d)(hsv, 1, y, x, image_(FromIntermediate)(s)); THTensor_(set3d)(hsv, 2, y, x, image_(FromIntermediate)(v)); } } return 0; } /* * Converts an HSV color value to RGB. Conversion formula * adapted from http://en.wikipedia.org/wiki/HSV_color_space. * Assumes h, s, and l are contained in the set [0, 1] and * returns r, g, and b in the set [0, 1]. */ int image_(Main_hsv2rgb)(lua_State *L) { THTensor *hsv = luaT_checkudata(L, 1, torch_Tensor); THTensor *rgb = luaT_checkudata(L, 2, torch_Tensor); int y, x; temp_t r, g, b, h, s, v; for (y=0; y<hsv->size[1]; y++) { for (x=0; x<hsv->size[2]; x++) { // get hsv h = THTensor_(get3d)(hsv, 0, y, x); s = THTensor_(get3d)(hsv, 1, y, x); v = THTensor_(get3d)(hsv, 2, y, x); #ifdef TH_REAL_IS_BYTE h /= 255; s /= 255; v /= 255; #endif int i = floor(h*6.); temp_t f = h*6-i; temp_t p = v*(1-s); temp_t q = v*(1-f*s); temp_t t = v*(1-(1-f)*s); switch (i % 6) { case 0: r = v, g = t, b = p; break; case 1: r = q, g = v, b = p; break; case 2: r = p, g = v, b = t; break; case 3: r = p, g = q, b = v; break; case 4: r = t, g = p, b = v; break; case 5: r = v, g = p, b = q; break; default: r=0; g = 0, b = 0; break; } // set rgb #ifdef TH_REAL_IS_BYTE r *= 255; g *= 255; b *= 255; #endif THTensor_(set3d)(rgb, 0, y, x, image_(FromIntermediate)(r)); THTensor_(set3d)(rgb, 1, y, x, image_(FromIntermediate)(g)); THTensor_(set3d)(rgb, 2, y, x, image_(FromIntermediate)(b)); } } return 0; } #ifndef TH_REAL_IS_BYTE /* * Convert an sRGB color channel to a linear sRGB color channel. */ static inline real image_(gamma_expand_sRGB)(real nonlinear) { return (nonlinear <= 0.04045) ? (nonlinear / 12.92) : (pow((nonlinear+0.055)/1.055, 2.4)); } /* * Convert a linear sRGB color channel to a sRGB color channel. */ static inline real image_(gamma_compress_sRGB)(real linear) { return (linear <= 0.0031308) ? (12.92 * linear) : (1.055 * pow(linear, 1.0/2.4) - 0.055); } /* * Converts an sRGB color value to LAB. * Based on http://www.brucelindbloom.com/index.html?Equations.html. * Assumes r, g, and b are contained in the set [0, 1]. * LAB output is NOT restricted to [0, 1]! */ int image_(Main_rgb2lab)(lua_State *L) { THTensor *rgb = luaT_checkudata(L, 1, torch_Tensor); THTensor *lab = luaT_checkudata(L, 2, torch_Tensor); // CIE Standard double epsilon = 216.0/24389.0; double k = 24389.0/27.0; // D65 white point double xn = 0.950456; double zn = 1.088754; int y,x; real r,g,b,l,a,_b; for (y=0; y<rgb->size[1]; y++) { for (x=0; x<rgb->size[2]; x++) { // get RGB r = image_(gamma_expand_sRGB)(THTensor_(get3d)(rgb, 0, y, x)); g = image_(gamma_expand_sRGB)(THTensor_(get3d)(rgb, 1, y, x)); b = image_(gamma_expand_sRGB)(THTensor_(get3d)(rgb, 2, y, x)); // sRGB to XYZ double X = 0.412453 * r + 0.357580 * g + 0.180423 * b; double Y = 0.212671 * r + 0.715160 * g + 0.072169 * b; double Z = 0.019334 * r + 0.119193 * g + 0.950227 * b; // normalize for D65 white point X /= xn; Z /= zn; // XYZ normalized to CIE Lab double fx = X > epsilon ? pow(X, 1/3.0) : (k * X + 16)/116; double fy = Y > epsilon ? pow(Y, 1/3.0) : (k * Y + 16)/116; double fz = Z > epsilon ? pow(Z, 1/3.0) : (k * Z + 16)/116; l = 116 * fy - 16; a = 500 * (fx - fy); _b = 200 * (fy - fz); // set lab THTensor_(set3d)(lab, 0, y, x, l); THTensor_(set3d)(lab, 1, y, x, a); THTensor_(set3d)(lab, 2, y, x, _b); } } return 0; } /* * Converts an LAB color value to sRGB. * Based on http://www.brucelindbloom.com/index.html?Equations.html. * returns r, g, and b in the set [0, 1]. */ int image_(Main_lab2rgb)(lua_State *L) { THTensor *lab = luaT_checkudata(L, 1, torch_Tensor); THTensor *rgb = luaT_checkudata(L, 2, torch_Tensor); int y,x; real r,g,b,l,a,_b; // CIE Standard double epsilon = 216.0/24389.0; double k = 24389.0/27.0; // D65 white point double xn = 0.950456; double zn = 1.088754; for (y=0; y<lab->size[1]; y++) { for (x=0; x<lab->size[2]; x++) { // get lab l = THTensor_(get3d)(lab, 0, y, x); a = THTensor_(get3d)(lab, 1, y, x); _b = THTensor_(get3d)(lab, 2, y, x); // LAB to XYZ double fy = (l + 16) / 116; double fz = fy - _b / 200; double fx = (a / 500) + fy; double X = pow(fx, 3); if (X <= epsilon) X = (116 * fx - 16) / k; double Y = l > (k * epsilon) ? pow((l + 16) / 116, 3) : l/k; double Z = pow(fz, 3); if (Z <= epsilon) Z = (116 * fz - 16) / k; X *= xn; Z *= zn; // XYZ to sRGB r = 3.2404542 * X - 1.5371385 * Y - 0.4985314 * Z; g = -0.9692660 * X + 1.8760108 * Y + 0.0415560 * Z; b = 0.0556434 * X - 0.2040259 * Y + 1.0572252 * Z; // set rgb THTensor_(set3d)(rgb, 0, y, x, image_(gamma_compress_sRGB(r))); THTensor_(set3d)(rgb, 1, y, x, image_(gamma_compress_sRGB(g))); THTensor_(set3d)(rgb, 2, y, x, image_(gamma_compress_sRGB(b))); } } return 0; } #else int image_(Main_rgb2lab)(lua_State *L) { return luaL_error(L, "image.rgb2lab: not supported for torch.ByteTensor"); } int image_(Main_lab2rgb)(lua_State *L) { return luaL_error(L, "image.lab2rgb: not supported for torch.ByteTensor"); } #endif // TH_REAL_IS_BYTE /* Vertically flip an image */ int image_(Main_vflip)(lua_State *L) { THTensor *dst = luaT_checkudata(L, 1, torch_Tensor); THTensor *src = luaT_checkudata(L, 2, torch_Tensor); int width = dst->size[2]; int height = dst->size[1]; int channels = dst->size[0]; long *is = src->stride; long *os = dst->stride; // get raw pointers real *dst_data = THTensor_(data)(dst); real *src_data = THTensor_(data)(src); long k, x, y; if (dst_data != src_data) { /* not in-place. * this branch could be removed by first duplicating the src into dst then doing inplace */ #pragma omp parallel for private(k, x, y) for(k=0; k<channels; k++) { for (y=0; y<height; y++) { for (x=0; x<width; x++) { dst_data[ k*os[0] + (height-1-y)*os[1] + x*os[2] ] = src_data[ k*is[0] + y*is[1] + x*is[2] ]; } } } } else { /* in-place */ real swap, * src_px, * dst_px; long half_height = height >> 1; for(k=0; k<channels; k++) { for (y=0; y < half_height; y++) { for (x=0; x<width; x++) { src_px = src_data + k*is[0] + y*is[1] + x*is[2]; dst_px = dst_data + k*is[0] + (height-1-y)*is[1] + x*is[2]; swap = *dst_px; *dst_px = *src_px; *src_px = swap; } } } } return 0; } /* Horizontally flip an image */ int image_(Main_hflip)(lua_State *L) { THTensor *dst = luaT_checkudata(L, 1, torch_Tensor); THTensor *src = luaT_checkudata(L, 2, torch_Tensor); int width = dst->size[2]; int height = dst->size[1]; int channels = dst->size[0]; long *is = src->stride; long *os = dst->stride; // get raw pointers real *dst_data = THTensor_(data)(dst); real *src_data = THTensor_(data)(src); long k, x, y; if (dst_data != src_data) { /* not in-place. * this branch could be removed by first duplicating the src into dst then doing inplace */ #pragma omp parallel for private(k, x, y) for(k=0; k<channels; k++) { for (y=0; y<height; y++) { for (x=0; x<width; x++) { dst_data[ k*os[0] + y*os[1] + (width-x-1)*os[2] ] = src_data[ k*is[0] + y*is[1] + x*is[2] ]; } } } } else { /* in-place */ real swap, * src_px, * dst_px; long half_width = width >> 1; for(k=0; k<channels; k++) { for (y=0; y < height; y++) { for (x=0; x<half_width; x++) { src_px = src_data + k*is[0] + y*is[1] + x*is[2]; dst_px = dst_data + k*is[0] + y*is[1] + (width-x-1)*is[2]; swap = *dst_px; *dst_px = *src_px; *src_px = swap; } } } } return 0; } /* flip an image along a specified dimension */ int image_(Main_flip)(lua_State *L) { THTensor *dst = luaT_checkudata(L, 1, torch_Tensor); THTensor *src = luaT_checkudata(L, 2, torch_Tensor); long flip_dim = luaL_checklong(L, 3); if ((dst->nDimension != 5) || (src->nDimension != 5)) { luaL_error(L, "image.flip: expected 5 dimensions for src and dst"); } if (flip_dim < 1 || flip_dim > dst->nDimension || flip_dim > 5) { luaL_error(L, "image.flip: flip_dim out of bounds"); } flip_dim--; // Make it zero indexed // get raw pointers real *dst_data = THTensor_(data)(dst); real *src_data = THTensor_(data)(src); if (dst_data == src_data) { luaL_error(L, "image.flip: in-place flip not supported"); } long size0 = dst->size[0]; long size1 = dst->size[1]; long size2 = dst->size[2]; long size3 = dst->size[3]; long size4 = dst->size[4]; if (src->size[0] != size0 || src->size[1] != size1 || src->size[2] != size2 || src->size[3] != size3 || src->size[4] != size4) { luaL_error(L, "image.flip: src and dst are not the same size"); } long *is = src->stride; long *os = dst->stride; long x, y, z, d, t, isrc, idst = 0; for (t = 0; t < size0; t++) { for (d = 0; d < size1; d++) { for (z = 0; z < size2; z++) { for (y = 0; y < size3; y++) { for (x = 0; x < size4; x++) { isrc = t*is[0] + d*is[1] + z*is[2] + y*is[3] + x*is[4]; // The big switch statement here looks ugly, however on my machine // gcc compiles it to a skip list, so it should be fast. switch (flip_dim) { case 0: idst = (size0 - t - 1)*os[0] + d*os[1] + z*os[2] + y*os[3] + x*os[4]; break; case 1: idst = t*os[0] + (size1 - d - 1)*os[1] + z*os[2] + y*os[3] + x*os[4]; break; case 2: idst = t*os[0] + d*os[1] + (size2 - z - 1)*os[2] + y*os[3] + x*os[4]; break; case 3: idst = t*os[0] + d*os[1] + z*os[2] + (size3 - y - 1)*os[3] + x*os[4]; break; case 4: idst = t*os[0] + d*os[1] + z*os[2] + y*os[3] + (size4 - x - 1)*os[4]; break; } dst_data[ idst ] = src_data[ isrc ]; } } } } } return 0; } static inline void image_(Main_bicubicInterpolate)( real* src, long* is, long* size, temp_t ix, temp_t iy, real* dst, long *os, real pad_value, int bounds_check) { int i, j, k; temp_t arr[4], p[4]; // Calculate fractional and integer components long x_pix = floor(ix); long y_pix = floor(iy); temp_t dx = ix - x_pix; temp_t dy = iy - y_pix; for (k=0; k<size[0]; k++) { #pragma unroll for (i = 0; i < 4; i++) { long v = y_pix + i - 1; real* data = &src[k * is[0] + v * is[1]]; #pragma unroll for (j = 0; j < 4; j++) { long u = x_pix + j - 1; if (bounds_check && (v < 0 || v >= size[1] || u < 0 || u >= size[2])) { p[j] = pad_value; } else { p[j] = data[u * is[2]]; } } arr[i] = image_(Main_cubicInterpolate)(p[0], p[1], p[2], p[3], dx); } temp_t value = image_(Main_cubicInterpolate)(arr[0], arr[1], arr[2], arr[3], dy); dst[k * os[0]] = image_(FromIntermediate)(value); } } /* * Warps an image, according to an (x,y) flow field. The flow * field is in the space of the destination image, each vector * ponts to a source pixel in the original image. */ int image_(Main_warp)(lua_State *L) { THTensor *dst = luaT_checkudata(L, 1, torch_Tensor); THTensor *src = luaT_checkudata(L, 2, torch_Tensor); THTensor *flowfield = luaT_checkudata(L, 3, torch_Tensor); int mode = lua_tointeger(L, 4); int offset_mode = lua_toboolean(L, 5); int clamp_mode = lua_tointeger(L, 6); real pad_value = (real)lua_tonumber(L, 7); // dims int width = dst->size[2]; int height = dst->size[1]; int src_width = src->size[2]; int src_height = src->size[1]; int channels = dst->size[0]; long *is = src->stride; long *os = dst->stride; long *fs = flowfield->stride; // get raw pointers real *dst_data = THTensor_(data)(dst); real *src_data = THTensor_(data)(src); real *flow_data = THTensor_(data)(flowfield); // resample long k,x,y,v,u,i,j; #pragma omp parallel for private(k, x, y, v, u, i, j) for (y=0; y<height; y++) { for (x=0; x<width; x++) { // subpixel position: float flow_y = flow_data[ 0*fs[0] + y*fs[1] + x*fs[2] ]; float flow_x = flow_data[ 1*fs[0] + y*fs[1] + x*fs[2] ]; float iy = offset_mode*y + flow_y; float ix = offset_mode*x + flow_x; // borders int off_image = 0; if (iy < 0 || iy > src_height - 1 || ix < 0 || ix > src_width - 1) { off_image = 1; } if (off_image == 1 && clamp_mode == 1) { // We're off the image and we're clamping the input image to 0 for (k=0; k<channels; k++) { dst_data[ k*os[0] + y*os[1] + x*os[2] ] = pad_value; } } else { ix = MAX(ix,0); ix = MIN(ix,src_width-1); iy = MAX(iy,0); iy = MIN(iy,src_height-1); // bilinear? switch (mode) { case 1: // Bilinear interpolation { // 4 nearest neighbors: long ix_nw = floor(ix); long iy_nw = floor(iy); long ix_ne = ix_nw + 1; long iy_ne = iy_nw; long ix_sw = ix_nw; long iy_sw = iy_nw + 1; long ix_se = ix_nw + 1; long iy_se = iy_nw + 1; // get surfaces to each neighbor: temp_t nw = (ix_se-ix)*(iy_se-iy); temp_t ne = (ix-ix_sw)*(iy_sw-iy); temp_t sw = (ix_ne-ix)*(iy-iy_ne); temp_t se = (ix-ix_nw)*(iy-iy_nw); // weighted sum of neighbors: for (k=0; k<channels; k++) { dst_data[ k*os[0] + y*os[1] + x*os[2] ] = image_(FromIntermediate)( src_data[ k*is[0] + iy_nw*is[1] + ix_nw*is[2] ] * nw + src_data[ k*is[0] + iy_ne*is[1] + MIN(ix_ne,src_width-1)*is[2] ] * ne + src_data[ k*is[0] + MIN(iy_sw,src_height-1)*is[1] + ix_sw*is[2] ] * sw + src_data[ k*is[0] + MIN(iy_se,src_height-1)*is[1] + MIN(ix_se,src_width-1)*is[2] ] * se); } } break; case 0: // Simple (i.e., nearest neighbor) { // 1 nearest neighbor: long ix_n = floor(ix+0.5); long iy_n = floor(iy+0.5); // weighted sum of neighbors: for (k=0; k<channels; k++) { dst_data[ k*os[0] + y*os[1] + x*os[2] ] = src_data[ k*is[0] + iy_n*is[1] + ix_n*is[2] ]; } } break; case 2: // Bicubic { // We only need to do bounds checking if ix or iy are near the edge int edge = !(iy >= 1 && iy < src_height - 2 && ix >= 1 && ix < src_width - 2); real* dst = dst_data + y*os[1] + x*os[2]; if (edge) { image_(Main_bicubicInterpolate)(src_data, is, src->size, ix, iy, dst, os, pad_value, 1); } else { image_(Main_bicubicInterpolate)(src_data, is, src->size, ix, iy, dst, os, pad_value, 0); } } break; case 3: // Lanczos { // Note: Lanczos can be made fast if the resampling period is // constant... and therefore the Lu, Lv can be cached and reused. // However, unfortunately warp makes no assumptions about resampling // and so we need to perform the O(k^2) convolution on each pixel AND // we have to re-calculate the kernel for every pixel. // See wikipedia for more info. // It is however an extremely good approximation to to full sinc // interpolation (IIR) filter. // Another note is that the version here has been optimized using // pretty aggressive code flow and explicit inlining. It might not // be very readable (contact me, Jonathan Tompson, if it is not) // Calculate fractional and integer components long x_pix = floor(ix); long y_pix = floor(iy); // Precalculate the L(x) function evaluations in the u and v direction #define rad (3) // This is a tunable parameter: 2 to 3 is OK float Lu[2 * rad]; // L(x) for u direction float Lv[2 * rad]; // L(x) for v direction for (u=x_pix-rad+1, i=0; u<=x_pix+rad; u++, i++) { float du = ix - (float)u; // Lanczos kernel x value du = du < 0 ? -du : du; // prefer not to used std absf if (du < 0.000001f) { // TODO: Is there a real eps standard? Lu[i] = 1; } else if (du > (float)rad) { Lu[i] = 0; } else { Lu[i] = ((float)rad * sin((float)M_PI * du) * sin((float)M_PI * du / (float)rad)) / ((float)(M_PI * M_PI) * du * du); } } for (v=y_pix-rad+1, i=0; v<=y_pix+rad; v++, i++) { float dv = iy - (float)v; // Lanczos kernel x value dv = dv < 0 ? -dv : dv; // prefer not to used std absf if (dv < 0.000001f) { // TODO: Is there a real eps standard? Lv[i] = 1; } else if (dv > (float)rad) { Lv[i] = 0; } else { Lv[i] = ((float)rad * sin((float)M_PI * dv) * sin((float)M_PI * dv / (float)rad)) / ((float)(M_PI * M_PI) * dv * dv); } } float sum_weights = 0; for (u=0; u<2*rad; u++) { for (v=0; v<2*rad; v++) { sum_weights += (Lu[u] * Lv[v]); } } for (k=0; k<channels; k++) { temp_t result = 0; for (u=x_pix-rad+1, i=0; u<=x_pix+rad; u++, i++) { long curu = MAX(MIN((long)(src_width-1), u), 0); for (v=y_pix-rad+1, j=0; v<=y_pix+rad; v++, j++) { long curv = MAX(MIN((long)(src_height-1), v), 0); temp_t Suv = src_data[k * is[0] + curv * is[1] + curu * is[2]]; temp_t weight = Lu[i] * Lv[j]; result += (Suv * weight); } } // Normalize by the sum of the weights result = result / (float)sum_weights; // Again, I assume that since the image is stored as reals we // don't have to worry about clamping to min and max int (to // prevent over or underflow) dst_data[ k*os[0] + y*os[1] + x*os[2] ] = image_(FromIntermediate)(result); } } break; } // end switch (mode) } // end else } } // done return 0; } int image_(Main_gaussian)(lua_State *L) { THTensor *dst = luaT_checkudata(L, 1, torch_Tensor); long width = dst->size[1]; long height = dst->size[0]; long *os = dst->stride; real *dst_data = THTensor_(data)(dst); temp_t amplitude = (temp_t)lua_tonumber(L, 2); int normalize = (int)lua_toboolean(L, 3); temp_t sigma_u = (temp_t)lua_tonumber(L, 4); temp_t sigma_v = (temp_t)lua_tonumber(L, 5); temp_t mean_u = (temp_t)lua_tonumber(L, 6) * width + 0.5; temp_t mean_v = (temp_t)lua_tonumber(L, 7) * height + 0.5; // Precalculate 1/(sigma*size) for speed (for some stupid reason the pragma // omp declaration prevents gcc from optimizing the inside loop on my macine: // verified by checking the assembly output) temp_t over_sigmau = 1.0 / (sigma_u * width); temp_t over_sigmav = 1.0 / (sigma_v * height); long v, u; temp_t du, dv; #pragma omp parallel for private(v, u, du, dv) for (v = 0; v < height; v++) { for (u = 0; u < width; u++) { du = (u + 1 - mean_u) * over_sigmau; dv = (v + 1 - mean_v) * over_sigmav; temp_t value = amplitude * exp(-0.5 * (du*du + dv*dv)); dst_data[ v*os[0] + u*os[1] ] = image_(FromIntermediate)(value); } } if (normalize) { temp_t sum = 0; // We could parallelize this, but it's more trouble than it's worth for(v = 0; v < height; v++) { for(u = 0; u < width; u++) { sum += dst_data[ v*os[0] + u*os[1] ]; } } temp_t one_over_sum = 1.0 / sum; #pragma omp parallel for private(v, u) for(v = 0; v < height; v++) { for(u = 0; u < width; u++) { dst_data[ v*os[0] + u*os[1] ] *= one_over_sum; } } } return 0; } /* * Borrowed from github.com/clementfarabet/lua---imgraph * with Clément's permission for implementing y2jet() */ int image_(Main_colorize)(lua_State *L) { // get args THTensor *output = (THTensor *)luaT_checkudata(L, 1, torch_Tensor); THTensor *input = (THTensor *)luaT_checkudata(L, 2, torch_Tensor); THTensor *colormap = (THTensor *)luaT_checkudata(L, 3, torch_Tensor); // dims long height = input->size[0]; long width = input->size[1]; // generate color map if not given int noColorMap = THTensor_(nElement)(colormap) == 0; if (noColorMap) { THTensor_(resize2d)(colormap, width*height, 3); THTensor_(fill)(colormap, -1); } // colormap channels int channels = colormap->size[1]; // generate output THTensor_(resize3d)(output, channels, height, width); int x,y,k; for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { int id = THTensor_(get2d)(input, y, x); if (noColorMap) { for (k = 0; k < channels; k++) { temp_t value = (float)rand() / (float)RAND_MAX; #ifdef TH_REAL_IS_BYTE value *= 255; #endif THTensor_(set2d)(colormap, id, k, image_(FromIntermediate)(value)); } } for (k = 0; k < channels; k++) { real color = THTensor_(get2d)(colormap, id, k); THTensor_(set3d)(output, k, y, x, color); } } } // return nothing return 0; } int image_(Main_rgb2y)(lua_State *L) { THTensor *rgb = luaT_checkudata(L, 1, torch_Tensor); THTensor *yim = luaT_checkudata(L, 2, torch_Tensor); luaL_argcheck(L, rgb->nDimension == 3, 1, "image.rgb2y: src not 3D"); luaL_argcheck(L, yim->nDimension == 2, 2, "image.rgb2y: dst not 2D"); luaL_argcheck(L, rgb->size[1] == yim->size[0], 2, "image.rgb2y: src and dst not of same height"); luaL_argcheck(L, rgb->size[2] == yim->size[1], 2, "image.rgb2y: src and dst not of same width"); int y, x; temp_t r, g, b, yc; const int height = rgb->size[1]; const int width = rgb->size[2]; for (y=0; y<height; y++) { for (x=0; x<width; x++) { // get Rgb r = THTensor_(get3d)(rgb, 0, y, x); g = THTensor_(get3d)(rgb, 1, y, x); b = THTensor_(get3d)(rgb, 2, y, x); yc = 0.299 * r + 0.587 * g + 0.114 * b; THTensor_(set2d)(yim, y, x, image_(FromIntermediate)(yc)); } } return 0; } static inline void image_(drawPixel)(THTensor *output, int y, int x, int cr, int cg, int cb) { #ifdef TH_REAL_IS_BYTE THTensor_(set3d)(output, 0, y, x, cr); THTensor_(set3d)(output, 1, y, x, cg); THTensor_(set3d)(output, 2, y, x, cb); #else THTensor_(set3d)(output, 0, y, x, cr / 255.0f); THTensor_(set3d)(output, 1, y, x, cg / 255.0f); THTensor_(set3d)(output, 2, y, x, cb / 255.0f); #endif } static inline void image_(drawChar)(THTensor *output, int x, int y, unsigned char c, int size, int cr, int cg, int cb, int bg_cr, int bg_cg, int bg_cb) { long channels = output->size[0]; long height = output->size[1]; long width = output->size[2]; /* out of bounds condition, return without drawing */ if((x >= width) || // Clip right (y >= height) || // Clip bottom ((x + 6 * size - 1) < 0) || // Clip left ((y + 8 * size - 1) < 0)) // Clip top return; for(char i = 0; i < 6; i++ ) { unsigned char line; if (i < 5) { line = *(const unsigned char *)(image_ada_font+(c*5) + i); } else { line = 0x0; } for(char j = 0; j < 8; j++, line >>= 1) { if(line & 0x1) { if (size == 1) { image_(drawPixel)(output, y+j, x+i, cr, cg, cb); } else { for (int ii = x+(i*size); ii < x+(i*size) + size; ii++) { for (int jj = y+(j*size); jj < y+(j*size) + size; jj++) { image_(drawPixel)(output, jj, ii, cr, cg, cb); } } } } else if (bg_cr != -1 && bg_cg != -1 && bg_cb != -1) { if (size == 1) { image_(drawPixel)(output, y+j, x+i, bg_cr, bg_cg, bg_cb); } else { for (int ii = x+(i*size); ii < x+(i*size) + size; ii++) { for (int jj = y+(j*size); jj < y+(j*size) + size; jj++) { image_(drawPixel)(output, jj, ii, bg_cr, bg_cg, bg_cb); } } } } } } } #ifndef luaL_checkint #define luaL_checkint(l,arg) (int)luaL_checkinteger(l,arg) #endif int image_(Main_drawtext)(lua_State *L) { // get args THTensor *output = (THTensor *)luaT_checkudata(L, 1, torch_Tensor); const char* text = lua_tostring(L, 2); long x = luaL_checklong(L, 3); long y = luaL_checklong(L, 4); int size = luaL_checkint(L, 5); int cr = luaL_checkint(L, 6); int cg = luaL_checkint(L, 7); int cb = luaL_checkint(L, 8); int bg_cr = luaL_checkint(L, 9); int bg_cg = luaL_checkint(L, 10); int bg_cb = luaL_checkint(L, 11); int wrap = luaL_checkint(L, 12); long len = strlen(text); // dims long channels = output->size[0]; long height = output->size[1]; long width = output->size[2]; long cursor_y = y; long cursor_x = x; for (long cnt = 0; cnt < len; cnt++) { unsigned char c = text[cnt]; if(c == '\n') { cursor_y += size*8; cursor_x = x; } else if(c == '\r') { // skip em } else { if(wrap && ((cursor_x + size * 6) >= width)) { // Heading off edge? cursor_x = 0; // Reset x to zero cursor_y += size * 8; // Advance y one line } image_(drawChar)(output, cursor_x, cursor_y, c, size, cr, cg, cb, bg_cr, bg_cg, bg_cb); cursor_x += size * 6; } } return 0; } int image_(Main_drawRect)(lua_State *L) { THTensor *output = (THTensor *)luaT_checkudata(L, 1, torch_Tensor); long x1long = luaL_checklong(L, 2); long y1long = luaL_checklong(L, 3); long x2long = luaL_checklong(L, 4); long y2long = luaL_checklong(L, 5); int lineWidth = luaL_checkint(L, 6); int cr = luaL_checkint(L, 7); int cg = luaL_checkint(L, 8); int cb = luaL_checkint(L, 9); int loffset = lineWidth / 2 + 1; int uoffset = lineWidth - loffset - 1; int x1l = (int) MAX(0, x1long - loffset); int y1l = (int) MAX(0, y1long - loffset); int x1u = (int) MIN(output->size[2], x1long + uoffset + 1); int y1u = (int) MIN(output->size[1], y1long + uoffset + 1); int x2l = (int) MAX(0, x2long - loffset); int y2l = (int) MAX(0, y2long - loffset); int x2u = (int) MIN(output->size[2], x2long + uoffset + 1); int y2u = (int) MIN(output->size[1], y2long + uoffset + 1); for (int y = y1l; y < y2u; y++) { for (int x = x1l; x < x1u; x++) { image_(drawPixel)(output, y, x, cr, cg, cb); } for (int x = x2l; x < x2u; x++) { image_(drawPixel)(output, y, x, cr, cg, cb); } } for (int x = x1l; x < x2u; x++) { for (int y = y1l; y < y1u; y++) { image_(drawPixel)(output, y, x, cr, cg, cb); } for (int y = y2l; y < y2u; y++) { image_(drawPixel)(output, y, x, cr, cg, cb); } } return 0; } static const struct luaL_Reg image_(Main__) [] = { {"scaleSimple", image_(Main_scaleSimple)}, {"scaleBilinear", image_(Main_scaleBilinear)}, {"scaleBicubic", image_(Main_scaleBicubic)}, {"rotate", image_(Main_rotate)}, {"rotateBilinear", image_(Main_rotateBilinear)}, {"polar", image_(Main_polar)}, {"polarBilinear", image_(Main_polarBilinear)}, {"logPolar", image_(Main_logPolar)}, {"logPolarBilinear", image_(Main_logPolarBilinear)}, {"translate", image_(Main_translate)}, {"cropNoScale", image_(Main_cropNoScale)}, {"warp", image_(Main_warp)}, {"saturate", image_(Main_saturate)}, {"rgb2y", image_(Main_rgb2y)}, {"rgb2hsv", image_(Main_rgb2hsv)}, {"rgb2hsl", image_(Main_rgb2hsl)}, {"hsv2rgb", image_(Main_hsv2rgb)}, {"hsl2rgb", image_(Main_hsl2rgb)}, {"rgb2lab", image_(Main_rgb2lab)}, {"lab2rgb", image_(Main_lab2rgb)}, {"gaussian", image_(Main_gaussian)}, {"vflip", image_(Main_vflip)}, {"hflip", image_(Main_hflip)}, {"flip", image_(Main_flip)}, {"colorize", image_(Main_colorize)}, {"text", image_(Main_drawtext)}, {"drawRect", image_(Main_drawRect)}, {NULL, NULL} }; void image_(Main_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, image_(Main__), "image"); } #endif // TH_GENERIC_FILE
nn_index.h
/*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. * * THE BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ #ifndef RTABMAP_FLANN_NNINDEX_H #define RTABMAP_FLANN_NNINDEX_H #include <vector> #include "rtflann/general.h" #include "rtflann/util/matrix.h" #include "rtflann/util/params.h" #include "rtflann/util/result_set.h" #include "rtflann/util/dynamic_bitset.h" #include "rtflann/util/saving.h" namespace rtflann { #define KNN_HEAP_THRESHOLD 250 class IndexBase { public: virtual ~IndexBase() {}; virtual size_t veclen() const = 0; virtual size_t size() const = 0; virtual flann_algorithm_t getType() const = 0; virtual int usedMemory() const = 0; virtual IndexParams getParameters() const = 0; virtual void loadIndex(FILE* stream) = 0; virtual void saveIndex(FILE* stream) = 0; }; /** * Nearest-neighbour index base class */ template <typename Distance> class NNIndex : public IndexBase { public: typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; NNIndex(Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0), removed_(false), removed_count_(0), data_ptr_(NULL) { } NNIndex(const IndexParams& params, Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0), index_params_(params), removed_(false), removed_count_(0), data_ptr_(NULL) { } NNIndex(const NNIndex& other) : distance_(other.distance_), last_id_(other.last_id_), size_(other.size_), size_at_build_(other.size_at_build_), veclen_(other.veclen_), index_params_(other.index_params_), removed_(other.removed_), removed_points_(other.removed_points_), removed_count_(other.removed_count_), ids_(other.ids_), points_(other.points_), data_ptr_(NULL) { if (other.data_ptr_) { data_ptr_ = new ElementType[size_*veclen_]; std::copy(other.data_ptr_, other.data_ptr_+size_*veclen_, data_ptr_); for (size_t i=0;i<size_;++i) { points_[i] = data_ptr_ + i*veclen_; } } } virtual ~NNIndex() { if (data_ptr_) { delete[] data_ptr_; } } virtual NNIndex* clone() const = 0; /** * Builds the index */ virtual void buildIndex() { freeIndex(); cleanRemovedPoints(); // building index buildIndexImpl(); size_at_build_ = size_; } /** * Builds the index using the specified dataset * @param dataset the dataset to use */ virtual void buildIndex(const Matrix<ElementType>& dataset) { setDataset(dataset); this->buildIndex(); } /** * @brief Incrementally add points to the index. * @param points Matrix with points to be added * @param rebuild_threshold */ virtual void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2) { throw FLANNException("Functionality not supported by this index"); } /** * Remove point from the index * @param index Index of point to be removed */ virtual void removePoint(size_t id) { if (!removed_) { ids_.resize(size_); for (size_t i=0;i<size_;++i) { ids_[i] = i; } removed_points_.resize(size_); removed_points_.reset(); last_id_ = size_; removed_ = true; } size_t point_index = id_to_index(id); if (point_index!=size_t(-1) && !removed_points_.test(point_index)) { removed_points_.set(point_index); removed_count_++; } } /** * Get point with specific id * @param id * @return */ virtual ElementType* getPoint(size_t id) { size_t index = id_to_index(id); if (index!=size_t(-1)) { return points_[index]; } else { return NULL; } } /** * @return number of features in this index. */ inline size_t size() const { return size_ - removed_count_; } inline size_t removedCount() const { return removed_count_; } inline size_t sizeAtBuild() const { return size_at_build_; } /** * @return The dimensionality of the features in this index. */ inline size_t veclen() const { return veclen_; } /** * Returns the parameters used by the index. * * @return The index parameters */ IndexParams getParameters() const { return index_params_; } template<typename Archive> void serialize(Archive& ar) { IndexHeader header; if (Archive::is_saving::value) { header.h.data_type = flann_datatype_value<ElementType>::value; header.h.index_type = getType(); header.h.rows = size_; header.h.cols = veclen_; } ar & header; // sanity checks if (Archive::is_loading::value) { if (strncmp(header.h.signature, FLANN_SIGNATURE_, strlen(FLANN_SIGNATURE_) - strlen("v0.0")) != 0) { throw FLANNException("Invalid index file, wrong signature"); } if (header.h.data_type != flann_datatype_value<ElementType>::value) { throw FLANNException("Datatype of saved index is different than of the one to be created."); } if (header.h.index_type != getType()) { throw FLANNException("Saved index type is different then the current index type."); } // TODO: check for distance type } ar & size_; ar & veclen_; ar & size_at_build_; bool save_dataset; if (Archive::is_saving::value) { save_dataset = get_param(index_params_,"save_dataset", false); } ar & save_dataset; if (save_dataset) { if (Archive::is_loading::value) { if (data_ptr_) { delete[] data_ptr_; } data_ptr_ = new ElementType[size_*veclen_]; points_.resize(size_); for (size_t i=0;i<size_;++i) { points_[i] = data_ptr_ + i*veclen_; } } for (size_t i=0;i<size_;++i) { ar & serialization::make_binary_object (points_[i], veclen_*sizeof(ElementType)); } } else { if (points_.size()!=size_) { throw FLANNException("Saved index does not contain the dataset and no dataset was provided."); } } ar & last_id_; ar & ids_; ar & removed_; if (removed_) { ar & removed_points_; } ar & removed_count_; } /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ virtual int knnSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); assert(indices.rows >= queries.rows); assert(dists.rows >= queries.rows); assert(indices.cols >= knn); assert(dists.cols >= knn); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } int count = 0; if (use_heap) { #pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } return count; } /** * * @param queries * @param indices * @param dists * @param knn * @param params * @return */ /*int knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { rtflann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols); int result = knnSearch(queries, indices_, dists, knn, params); for (size_t i=0;i<indices.rows;++i) { for (size_t j=0;j<indices.cols;++j) { indices[i][j] = indices_[i][j]; } } delete[] indices_.ptr(); return result; }*/ /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ virtual int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); int count = 0; if (use_heap) { #pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } return count; } /** * * @param queries * @param indices * @param dists * @param knn * @param params * @return */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { std::vector<std::vector<size_t> > indices_; int result = knnSearch(queries, indices_, dists, knn, params); indices.resize(indices_.size()); for (size_t i=0;i<indices_.size();++i) { indices[i].assign(indices_[i].begin(), indices_[i].end()); } return result; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indices of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ virtual int radiusSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; size_t num_neighbors = std::min(indices.cols, dists.cols); int max_neighbors = params.max_neighbors; if (max_neighbors<0) max_neighbors = num_neighbors; else max_neighbors = std::min(max_neighbors,(int)num_neighbors); if (max_neighbors==0) { #pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); count += resultSet.size(); } } } else { // explicitly indicated to use unbounded radius result set // and we know there'll be enough room for resulting indices and dists if (params.max_neighbors<0 && (num_neighbors>=size())) { #pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if (n>num_neighbors) n = num_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } else { // number of neighbors limited to max_neighbors #pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if ((int)n>max_neighbors) n = max_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } } return count; } /** * * @param queries * @param indices * @param dists * @param radius * @param params * @return */ int radiusSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { rtflann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols); int result = radiusSearch(queries, indices_, dists, radius, params); for (size_t i=0;i<indices.rows;++i) { for (size_t j=0;j<indices.cols;++j) { indices[i][j] = indices_[i][j]; } } delete[] indices_.ptr(); return result; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indices of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ virtual int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; // just count neighbors if (params.max_neighbors==0) { #pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); count += resultSet.size(); } } } else { if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); if (params.max_neighbors<0) { // search for all neighbors #pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } else { // number of neighbors limited to max_neighbors #pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if ((int)n>params.max_neighbors) n = params.max_neighbors; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } } return count; } /** * * @param queries * @param indices * @param dists * @param radius * @param params * @return */ int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { std::vector<std::vector<size_t> > indices_; int result = radiusSearch(queries, indices_, dists, radius, params); indices.resize(indices_.size()); for (size_t i=0;i<indices_.size();++i) { indices[i].assign(indices_[i].begin(), indices_[i].end()); } return result; } virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const = 0; protected: virtual void freeIndex() = 0; virtual void buildIndexImpl() = 0; size_t id_to_index(size_t id) { if (ids_.size()==0) { return id; } size_t point_index = size_t(-1); if (id < ids_.size() && ids_[id]==id) { return id; } else { // binary search size_t start = 0; size_t end = ids_.size(); while (start<end) { size_t mid = (start+end)/2; if (ids_[mid]==id) { point_index = mid; break; } else if (ids_[mid]<id) { start = mid + 1; } else { end = mid; } } } return point_index; } void indices_to_ids(const size_t* in, size_t* out, size_t size) const { if (removed_) { for (size_t i=0;i<size;++i) { out[i] = ids_[in[i]]; } } } void setDataset(const Matrix<ElementType>& dataset) { size_ = dataset.rows; veclen_ = dataset.cols; last_id_ = 0; ids_.clear(); removed_points_.clear(); removed_ = false; removed_count_ = 0; points_.resize(size_); for (size_t i=0;i<size_;++i) { points_[i] = dataset[i]; } } void extendDataset(const Matrix<ElementType>& new_points) { size_t new_size = size_ + new_points.rows; if (removed_) { removed_points_.resize(new_size); ids_.resize(new_size); } points_.resize(new_size); for (size_t i=size_;i<new_size;++i) { points_[i] = new_points[i-size_]; if (removed_) { ids_[i] = last_id_++; removed_points_.reset(i); } } size_ = new_size; } void cleanRemovedPoints() { if (!removed_) return; size_t last_idx = 0; for (size_t i=0;i<size_;++i) { if (!removed_points_.test(i)) { points_[last_idx] = points_[i]; ids_[last_idx] = ids_[i]; removed_points_.reset(last_idx); ++last_idx; } } points_.resize(last_idx); ids_.resize(last_idx); removed_points_.resize(last_idx); size_ = last_idx; removed_count_ = 0; } void swap(NNIndex& other) { std::swap(distance_, other.distance_); std::swap(last_id_, other.last_id_); std::swap(size_, other.size_); std::swap(size_at_build_, other.size_at_build_); std::swap(veclen_, other.veclen_); std::swap(index_params_, other.index_params_); std::swap(removed_, other.removed_); std::swap(removed_points_, other.removed_points_); std::swap(removed_count_, other.removed_count_); std::swap(ids_, other.ids_); std::swap(points_, other.points_); std::swap(data_ptr_, other.data_ptr_); } protected: /** * The distance functor */ Distance distance_; /** * Each index point has an associated ID. IDs are assigned sequentially in * increasing order. This indicates the ID assigned to the last point added to the * index. */ size_t last_id_; /** * Number of points in the index (and database) */ size_t size_; /** * Number of features in the dataset when the index was last built. */ size_t size_at_build_; /** * Size of one point in the index (and database) */ size_t veclen_; /** * Parameters of the index. */ IndexParams index_params_; /** * Flag indicating if at least a point was removed from the index */ bool removed_; /** * Array used to mark points removed from the index */ DynamicBitset removed_points_; /** * Number of points removed from the index */ size_t removed_count_; /** * Array of point IDs, returned by nearest-neighbour operations */ std::vector<size_t> ids_; /** * Point data */ std::vector<ElementType*> points_; /** * Pointer to dataset memory if allocated by this index, otherwise NULL */ ElementType* data_ptr_; }; #define USING_BASECLASS_SYMBOLS \ using NNIndex<Distance>::distance_;\ using NNIndex<Distance>::size_;\ using NNIndex<Distance>::size_at_build_;\ using NNIndex<Distance>::veclen_;\ using NNIndex<Distance>::index_params_;\ using NNIndex<Distance>::removed_points_;\ using NNIndex<Distance>::ids_;\ using NNIndex<Distance>::removed_;\ using NNIndex<Distance>::points_;\ using NNIndex<Distance>::extendDataset;\ using NNIndex<Distance>::setDataset;\ using NNIndex<Distance>::cleanRemovedPoints;\ using NNIndex<Distance>::indices_to_ids; } #endif //FLANN_NNINDEX_H
udr-2.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ struct W { int w; }; void init (struct W *, int, int *); int v; #pragma omp declare reduction (foo : long int : omp_out |= v) /* { dg-error "combiner refers to variable" } */ #pragma omp declare reduction (foo : char : omp_out = v) /* { dg-error "combiner refers to variable" } */ typedef short T; #pragma omp declare reduction (foo : T : omp_out += v) /* { dg-error "combiner refers to variable" } */ #pragma omp declare reduction (foo : int : v *= omp_in) /* { dg-error "combiner refers to variable" } */ #pragma omp declare reduction (foo : struct W : omp_out.w *= omp_in.w + v) /* { dg-error "combiner refers to variable" } */ void foo (int v) { #pragma omp declare reduction (foo : long int : omp_out |= v) /* { dg-error "combiner refers to variable" } */ #pragma omp declare reduction (foo : char : omp_out = v) /* { dg-error "combiner refers to variable" } */ #pragma omp declare reduction (foo : T : omp_out += v) /* { dg-error "combiner refers to variable" } */ #pragma omp declare reduction (foo : int : v *= omp_in) /* { dg-error "combiner refers to variable" } */ #pragma omp declare reduction (foo : struct W : omp_out.w *= omp_in.w + v) /* { dg-error "combiner refers to variable" } */ } #pragma omp declare reduction (bar : long int : omp_out |= omp_in) initializer (omp_priv = v) /* { dg-error "initializer refers to variable" } */ #pragma omp declare reduction (bar : char : omp_out += omp_in) initializer (omp_priv = ((char) v)) /* { dg-error "initializer refers to variable" } */ #pragma omp declare reduction (bar : T : omp_out += omp_in) initializer (omp_priv = (short) v) /* { dg-error "initializer refers to variable" } */ #pragma omp declare reduction (bar : _Complex double : omp_out *= omp_in) initializer (omp_priv = (v)) /* { dg-error "initializer refers to variable" } */ #pragma omp declare reduction (bar : struct W : omp_out.w *= omp_in.w) initializer (omp_priv = { v } ) /* { dg-error "initializer refers to variable" } */ #pragma omp declare reduction (bar2 : struct W : omp_out.w *= omp_in.w) initializer (init (&omp_priv, v, (int *) 0)) /* { dg-error "initializer refers to variable" } */ #pragma omp declare reduction (bar3 : struct W : omp_out.w *= omp_in.w) initializer (init (&omp_priv, 0, &v)) /* { dg-error "initializer refers to variable" } */ void bar (int v) { #pragma omp declare reduction (bar : long int : omp_out |= omp_in) initializer (omp_priv = v) /* { dg-error "initializer refers to variable" } */ #pragma omp declare reduction (bar : char : omp_out += omp_in) initializer (omp_priv = ((char) v)) /* { dg-error "initializer refers to variable" } */ #pragma omp declare reduction (bar : T : omp_out += omp_in) initializer (omp_priv = (short) v) /* { dg-error "initializer refers to variable" } */ #pragma omp declare reduction (bar : _Complex double : omp_out *= omp_in) initializer (omp_priv = (v)) /* { dg-error "initializer refers to variable" } */ #pragma omp declare reduction (bar : struct W : omp_out.w *= omp_in.w) initializer (omp_priv = { v }) /* { dg-error "initializer refers to variable" } */ #pragma omp declare reduction (bar2 : struct W : omp_out.w *= omp_in.w) initializer (init (&omp_priv, v, (int *) 0)) /* { dg-error "initializer refers to variable" } */ #pragma omp declare reduction (bar3 : struct W : omp_out.w *= omp_in.w) initializer (init (&omp_priv, 0, &v)) /* { dg-error "initializer refers to variable" } */ }
onset.c
#include <math.h> #ifndef _OPENMP #define STRING2(x) #x #define STRING(x) STRING2(x) #pragma message (__FILE__ "(" STRING(__LINE__) "): error: This module should be compiled with /openmp on the command line") /* Generate a compiler error to stop the build as the above message doesn't stop the build when building in Matlab. */ mustLinkOpenMP #endif #if defined(_MSC_VER) // Microsoft #define EXPORT __declspec(dllexport) #define IMPORT __declspec(dllimport) #elif defined(_GCC) // GCC #define EXPORT __attribute__((visibility("default"))) #define IMPORT #else #define EXPORT #define IMPORT // #pragma warning Unknown dynamic link import/export semantics. #endif EXPORT void onset(double *envPt, int nsmp, int stw, int ltw, int gap, double *onsPt) { int index, ii; int gap2; double ssw, slw, scl; double *sswPt, *elwPt, *eswPt; gap2 = gap >> 1; ssw=0.0;slw=0.0;scl=(double) ltw/stw; index=nsmp-ltw-stw-2*gap2; onsPt=&onsPt[ltw+gap2]; sswPt=&envPt[ltw+gap2+gap2+1]; for (ii=0; ii < ltw; ii++) { slw+=envPt[ii]; } for (ii=0; ii < stw; ii++) { ssw+=sswPt[ii]; } onsPt[0] = (ssw/slw)*scl; elwPt=&envPt[ltw-1]; eswPt=&sswPt[stw-1]; for (ii=1; ii < index; ii++) { slw=slw+elwPt[ii]-envPt[ii-1]; ssw=ssw+eswPt[ii]-sswPt[ii-1]; if (slw > 0) onsPt[ii] = (ssw/slw)*scl; else onsPt[ii] = 0.0; } } EXPORT void onset_mp(double *dataPt, int ntr, int nsamp, int swin, int lwin, int gap, double *resultPt) { double *envPt, *onsPt; int tr; #pragma omp parallel for private(tr,envPt,onsPt) for (tr=0; tr<ntr; tr++) { envPt = &dataPt[tr*nsamp]; onsPt = &resultPt[tr*nsamp]; onset(envPt, nsamp, swin, lwin, gap, onsPt); } }
GB_binop__lor_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lor_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__lor_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__lor_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__lor_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_uint16) // A*D function (colscale): GB (_AxD__lor_uint16) // D*A function (rowscale): GB (_DxB__lor_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__lor_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__lor_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_uint16) // C=scalar+B GB (_bind1st__lor_uint16) // C=scalar+B' GB (_bind1st_tran__lor_uint16) // C=A+scalar GB (_bind2nd__lor_uint16) // C=A'+scalar GB (_bind2nd_tran__lor_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) || (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_UINT16 || GxB_NO_LOR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lor_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lor_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lor_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lor_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lor_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lor_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lor_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lor_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lor_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_int32_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int32_bool // op(A') function: GB_tran__lnot_int32_bool // C type: int32_t // A type: bool // cast: int32_t cij = (int32_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int32_bool ( int32_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int32_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
soap.c
// SPDX-License-Identifier: BSD-2-Clause /* Copyright 1998-1999 Bernard Parent Copyright 2020 Prasanna Thoguluva Rajendran Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "soap.h" #include "printf.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <string.h> #include <assert.h> #include <stdarg.h> #ifdef DISTMPI #include "mpi.h" #endif #define EOS 0 #define pi 3.14159265358979323846 #define sqr(a) ((a)*(a)) #define max(a,b) ((a) > (b) ? (a) : (b)) #define min(a,b) ((a) < (b) ? (a) : (b)) #define rad(a) (((a)*pi/180.0e0)) #define deg(a) (((a)/pi*180.0e0)) #ifndef round //#define round(a) (floor((a)+0.5e0)) #define round(a) (a<0?ceil((a)-0.5):floor((a)+0.5)) #endif //#define longfromdouble(a) ((long)(a+0.5)) #define longfromdouble(a) ((a)>=0?(long)((a)+0.5):(long)((a)-0.5)) #define DOUBLEFORMAT "%11.11E" /* the latter will give 12 significant numbers when performing calculations */ /* logical operators */ #define GT 21 #define GEQ 22 #define LT 23 #define LEQ 24 #define EQ 25 #define AND 26 #define OR 27 #define NOT 28 #define NEQ 29 #define maxnumlen 30 /* 30 chars are enough to store 15 significant numbers */ /* this function calls vfprintf with the same arguments as it is given and exits.*/ int SOAP_fatal_error(SOAP_codex_t *codex, const char *formatstr, ...){ va_list ap; char *newstr; int retval,term_height,term_width; newstr=(char *)malloc(10000*sizeof(char)); fprintf(stderr,"\n\n"); va_start(ap, formatstr); vsprintf(newstr,formatstr, ap); va_end(ap); find_terminal_window_size(&term_width,&term_height); fprintf(stderr,"%s",strwrp(newstr,min(term_width-1,70))); free(newstr); fprintf(stderr,"\n\nSOAP fatal error "); if (codex->action_being_processed!=NULL) fprintf(stderr,"within %s() ",codex->action_being_processed); fprintf(stderr,"in the vicinity of line %ld in file %s.\n\nExiting.\n\n", codex->linenum,codex->filename); exit(EXIT_FAILURE); retval=EXIT_FAILURE; return(retval); } /* cut _all characters from is to ie in *str */ void SOAP_strcut(long is, long ie, char *str){ long i; i=is; do { str[i]=str[i+(ie-is)+1]; i++; } while (str[i-1]!=EOS); } /* insert str1 into str2 at the position pos */ void SOAP_strins(char *str1, char **str2, long pos){ long len1,len2,i; len1=(long)strlen(str1); len2=(long)strlen(*str2); *str2=(char *)realloc(*str2,(len2+len1+3)*sizeof(char)); for (i=len2; i>=pos; i--) (*str2)[i+len1]=(*str2)[i]; for (i=0; i<len1; i++) (*str2)[pos+i]=str1[i]; (*str2)[len2+len1]=EOS; } void SOAP_store_file_as_string(char *filename, char **str){ FILE *file; long cnt; file = fopen(filename, "r"); if (file==NULL) { fprintf(stderr,"\nHaving problems opening file %s.\nExiting.\n\n",filename); exit(EXIT_FAILURE); } cnt=0; do { *str=(char *)realloc(*str,(cnt+3)*sizeof(char)); (*str)[cnt]=fgetc(file); cnt++; } while (!feof(file)); fclose(file); (*str)[cnt-1]=EOS; } /* returns TRUE on success, FALSE otherwise find string str in expr following cnts anchorL and anchorR represent the boundaries of the string*/ static bool find_str_in_str(char *expr, char *str, long cnts, long *anchorL, long *anchorR) { long len, i, c,cnt; len = (long)strlen(str); i = 0; cnt=cnts; while(i != len){ c=expr[cnt]; cnt++; *anchorR=cnt-1; *anchorL=cnt-len; if (c == EOS) return FALSE; if (str[i] == (char)c) { i++; } else { i = 0; } } return(TRUE); } static double random_double(double minval, double maxval){ long randinput; double tmp; randinput=random(); tmp=(double)(randinput)/(double)(RAND_MAX)*(maxval-minval)+minval; return(tmp); } /* returns true if expr[i] is a valid operator, false otherwise */ static bool is_operator(char *expr, long i){ bool tmp; tmp=FALSE; if (expr[i]!=EOS) { if ( expr[i]=='*' || expr[i]=='/' || expr[i]=='^' || expr[i]==GT || expr[i]==GEQ || expr[i]==LT || expr[i]==LEQ || expr[i]==EQ || expr[i]==AND || expr[i]==OR || expr[i]==NEQ) tmp=TRUE; if ( (expr[i]=='+' || expr[i]=='-') && (i>0 && ((expr[i-1]>='0' && expr[i-1]<='9') || expr[i-1]=='.')) ) tmp=TRUE; } return(tmp); } static bool is_logical(SOAP_codex_t *codex, long num){ bool tmp; tmp=(bool)num; if (num!=0 && num!=1) { SOAP_fatal_error(codex,"Expecting a is_logical expression (0 or 1) but got %ld.",num); } return(tmp); } /* replaces the NOT operator '!' in *expr */ static void replace_NOT_operator(SOAP_codex_t *codex, char **expr){ long cnt,anchorL,anchorR; char *strR; double res; char *res_str; bool FOUND,res_bool; int eos=EOS; res_str=(char *)malloc(maxnumlen*sizeof(char)); strR=(char *)malloc(sizeof(char)); FOUND=FALSE; do { cnt=0; anchorL=0; FOUND=FALSE; do { if ( (*expr)[cnt]==NOT ) { anchorL=cnt; FOUND=TRUE; } cnt++; } while ((*expr)[cnt]!=EOS); if (FOUND) { anchorR=anchorL+1; do { anchorR++; } while ((!is_operator((*expr),anchorR)) && ((*expr)[anchorR]!=EOS)); anchorR--; strR=(char *)realloc(strR,(anchorR-anchorL+3)*sizeof(char)); for (cnt=anchorL+1; cnt<=anchorR; cnt++) strR[cnt-anchorL-1]=(*expr)[cnt]; strR[anchorR-anchorL]=EOS; if (sscanf(strR,"%lg%n",&res,&eos)!=1 || strR[eos]!=EOS) { SOAP_fatal_error(codex,"Cannot read expression >%s<.",strR); } res_bool=is_logical(codex,longfromdouble(res)); if (res_bool==0) strcpy(res_str,"1"); if (res_bool==1) strcpy(res_str,"0"); SOAP_strcut(anchorL,anchorR,*expr); SOAP_strins(res_str,expr,anchorL); } } while (FOUND); free(strR); free(res_str); } /* evaluate a string expression in which there are no parentheses "10*50/40^10.0E7" */ static double evaluate_arithmetic_1(SOAP_codex_t *codex, char *expr_orig){ long cnt,priority,anchor,anchorL,anchorR; char *expr; char *strL,*strR; double tmp,res,numL,numR; char *res_str; bool SINGLENUM; int eos=EOS; res_str=(char *)malloc(maxnumlen*sizeof(char)); expr=(char *)malloc(((long)strlen(expr_orig)+3)*sizeof(char)); strL=(char *)malloc(sizeof(char)); strR=(char *)malloc(sizeof(char)); strcpy(expr,expr_orig); //if (expr[0]=='-' || expr[0]=='+') SOAP_strins("0.0",&expr,0); //??? // do { // strrep(expr, "+-", "-"); // strrep(expr, "--", "+"); // } while(strstr(expr,"--")!=NULL || strstr(expr,"+-")!=NULL); if (expr[0]=='-' && expr[1]=='-') { SOAP_strins("0.0",&expr,0); } replace_NOT_operator(codex,&expr); SINGLENUM=FALSE; do { cnt=0; priority=-2; anchor=0; do { cnt++; if (is_operator(expr,cnt)) { if (( expr[cnt]==AND || expr[cnt]==OR) && priority<-1) { priority=-1; anchor=cnt; } if (( expr[cnt]==GT || expr[cnt]==GEQ || expr[cnt]==LT || expr[cnt]==LEQ || expr[cnt]==EQ || expr[cnt]==NEQ ) && priority<0) { priority=0; anchor=cnt; } if ((expr[cnt]=='-' || expr[cnt]=='+') && priority<1) { priority=1; anchor=cnt; } if ((expr[cnt]=='*' || expr[cnt]=='/') && priority<2) { priority=2; anchor=cnt; } if ((expr[cnt]=='^') && priority<3) { priority=3; anchor=cnt; } } } while (expr[cnt]!=EOS); if (anchor!=0) { anchorL=anchor; do { anchorL--; } while ((!is_operator(expr,anchorL)) && (anchorL!=0)); if (anchorL!=0) anchorL++; //??? anchorR=anchor; do { anchorR++; } while ((!is_operator(expr,anchorR)) && (expr[anchorR]!=EOS)); anchorR--; strL=(char *)realloc(strL,(anchor-anchorL+3)*sizeof(char)); strR=(char *)realloc(strR,(anchorR-anchor+3)*sizeof(char)); for (cnt=anchorL; cnt<anchor; cnt++) strL[cnt-anchorL]=expr[cnt]; strL[anchor-anchorL]=EOS; for (cnt=anchor+1; cnt<=anchorR; cnt++) strR[cnt-anchor-1]=expr[cnt]; strR[anchorR-anchor]=EOS; if (sscanf(strL,"%lg%n",&numL,&eos)!=1 || strL[eos]!=EOS) { SOAP_fatal_error(codex,"Problem reading expression >%s<.",strL); } if (sscanf(strR,"%lg%n",&numR,&eos)!=1 || strR[eos]!=EOS) { SOAP_fatal_error(codex,"Problem reading expression >%s<.",strR); } res=0.0e0; /* to avoid compiler warning */ switch (expr[anchor]) { case OR: if (is_logical(codex,longfromdouble(numL)) || is_logical(codex,longfromdouble(numR))) res=1.0e0; else res=0.0e0; break; case AND: if (is_logical(codex,longfromdouble(numL)) && is_logical(codex,longfromdouble(numR))) res=1.0e0; else res=0.0e0; break; case NEQ: if (numL!=numR) res=1.0e0; else res=0.0e0; break; case EQ: if (numL==numR) res=1.0e0; else res=0.0e0; break; case GEQ: if (numL>=numR) res=1.0e0; else res=0.0e0; break; case LEQ: if (numL<=numR) res=1.0e0; else res=0.0e0; break; case LT: if (numL<numR) res=1.0e0; else res=0.0e0; break; case GT: if (numL>numR) res=1.0e0; else res=0.0e0; break; case '-': res=numL-numR; break; case '+': res=numL+numR; break; case '*': res=numL*numR; break; case '/': res=numL/numR; break; case '^': res=pow(numL,numR); break; } sprintf(res_str,DOUBLEFORMAT,res); SOAP_strcut(anchorL,anchorR,expr); SOAP_strins(res_str,&expr,anchorL); } else { SINGLENUM=TRUE; } } while (!SINGLENUM); if (sscanf(expr,"%lg%n",&tmp,&eos)!=1 || expr[eos]!=EOS) { SOAP_fatal_error(codex,"Problem reading expression >%s<.",expr); } free(strL); free(strR); free(expr); free(res_str); return(tmp); } /* evaluate a string expression in which there are parentheses "(10*50)/40^10.0E7" */ double SOAP_evaluate_arithmetic(SOAP_codex_t *codex, char *expr_orig){ char *expr; char *expr2; char *res_str; long cnt,cnt2,anchorL,anchorR; double res; bool STILLSOME; res_str=(char *)malloc(maxnumlen*sizeof(char)); expr=(char *)malloc(((long)strlen(expr_orig)+3)*sizeof(char)); expr2=(char *)malloc(((long)strlen(expr_orig)+3)*sizeof(char)); strcpy(expr,expr_orig); /* first check if parentheses are balanced */ cnt2=0; for (cnt=0; cnt<(long)strlen(expr); cnt++){ if (expr[cnt]=='(') cnt2++; if (expr[cnt]==')') cnt2--; } if (cnt2!=0) { SOAP_fatal_error(codex,"Parentheses not balanced: >%s<.",expr); } do { /* find expr2, the expression in brackets which needs to be evaluated first from expr and replace its value in expr*/ STILLSOME=FALSE; cnt=0; anchorL=0; do { if (expr[cnt]=='(') { anchorL=cnt+1; STILLSOME=TRUE; } cnt++; } while (expr[cnt]!=')' && expr[cnt]!=EOS); anchorR=cnt-1; expr2=(char *)realloc(expr2,((long)strlen(expr)+3)*sizeof(char)); for (cnt=anchorL; cnt<=anchorR; cnt++){ expr2[cnt-anchorL]=expr[cnt]; } expr2[anchorR-anchorL+1]=EOS; res=evaluate_arithmetic_1(codex,expr2); if (STILLSOME) { SOAP_strcut(anchorL-1,anchorR+1,expr); sprintf(res_str,DOUBLEFORMAT,res); SOAP_strins(res_str,&expr,anchorL-1); } } while (STILLSOME); free(expr); free(expr2); free(res_str); return(res); } /* is character a valid one for variables (or functions)? */ static bool is_part_of_var(char chr){ bool ans; ans=FALSE; if (chr>='a' && chr<='z') ans=TRUE; if (chr>='A' && chr<='Z') ans=TRUE; if (chr>='0' && chr<='9') ans=TRUE; if (chr=='_' || chr=='.' || chr=='[' || chr==']') ans=TRUE; return(ans); } /* returns TRUE on success, FALSE otherwise find word str in expr following cnts anchorL and anchorR represent the boundaries of the word*/ static bool find_word_in_string(char *expr, char *word, long *anchorL, long *anchorR) { bool WORD,STR; long cnts; cnts=0; do { STR=find_str_in_str(expr, word, cnts, anchorL, anchorR); WORD=TRUE; if (STR) { if (is_part_of_var(expr[*anchorR+1])) WORD=FALSE; if (*anchorL!=0) { if (is_part_of_var(expr[*anchorL-1])) WORD=FALSE; } } else { WORD=FALSE; } if (!WORD) cnts++; } while (!WORD && expr[cnts]!=EOS); return WORD; } /* substitute the expressions contained in _all array elements (between [ and ]) to their corresponding values */ static void substitute_array_elements(char **name, SOAP_codex_t *codex){ long cnt,brackets; long anchorL,anchorR; char *expr; bool ENDREACHED; expr=(char *)malloc(sizeof(char)); anchorL=0; ENDREACHED=FALSE; while (!ENDREACHED){ brackets=0; do { anchorL++; if ((*name)[anchorL]=='[') brackets++; if ((*name)[anchorL]==EOS) ENDREACHED=TRUE; } while (brackets==0 && !ENDREACHED); anchorL++; if (!ENDREACHED) { cnt=anchorL; do { cnt++; if ((*name)[cnt]=='[') brackets++; if ((*name)[cnt]==']') brackets--; } while ( brackets!=0 && (*name)[cnt]!=EOS ); if ((*name)[cnt]==EOS) { SOAP_fatal_error(codex,"Missing end of array character ] " "in string >%s<.",*name); } anchorR=cnt-1; expr=(char *)realloc(expr,(anchorR-anchorL+3)*sizeof(char)); for (cnt=anchorL; cnt<=anchorR; cnt++) expr[cnt-anchorL]=(*name)[cnt]; expr[anchorR-anchorL+1]=EOS; SOAP_substitute_expression(&expr, codex); SOAP_strcut(anchorL,anchorR,*name); SOAP_strins(expr,name,anchorL); } } free(expr); } /* substitute variables in string *expr */ static void substitute_vars(char **expr, SOAP_codex_t *codex){ long cnt,anchorL,anchorR; char *varname,*varvalue; /* need to set anchorL and anchorR to 0 to get rid of gcc warning */ anchorL=0; anchorR=0; varname=(char *)malloc(maxnumlen*sizeof(char)); varvalue=(char *)malloc(maxnumlen*sizeof(char)); /* predefined variables first */ for (cnt=0; cnt<7; cnt++){ switch (cnt) { case 0: sprintf(varname,"pi"); sprintf(varvalue,DOUBLEFORMAT,pi); break; case 1: sprintf(varname,"TRUE"); sprintf(varvalue,"1"); break; case 2: sprintf(varname,"FALSE"); sprintf(varvalue,"0"); break; case 3: sprintf(varname,"YES"); sprintf(varvalue,"1"); break; case 4: sprintf(varname,"NO"); sprintf(varvalue,"0"); break; case 5: sprintf(varname,"EXIT_SUCCESS"); sprintf(varvalue,"0"); break; case 6: sprintf(varname,"EXIT_FAILURE"); sprintf(varvalue,"1"); break; } while (find_word_in_string(*expr, varname, &anchorL, &anchorR)){ SOAP_strcut(anchorL,anchorR,*expr); SOAP_strins(varvalue,expr,anchorL); } } /* user defined variables second */ substitute_array_elements(expr,codex); cnt=0; if (codex->vars[0].name!=NULL) { do { while (find_word_in_string(*expr, codex->vars[cnt].name, &anchorL, &anchorR)) { SOAP_strcut(anchorL,anchorR,*expr); SOAP_strins(codex->vars[cnt].value,expr,anchorL); } cnt++; } while (codex->vars[cnt].name!=NULL); } free(varname); free(varvalue); } /* get the nth argument and store it in *expr; arguments are counted from 0. *expr must already have been malloc'ed*/ static void get_argum_straight_0(SOAP_codex_t *codex, char **expr, char *argum, long n, long *anchorL, long *anchorR){ long cnt,parentheses; bool INSTRING; INSTRING=FALSE; *anchorL=0; for (cnt=0; cnt<n; cnt++){ parentheses=0; do { if (argum[*anchorL]=='"') INSTRING=!INSTRING; if (argum[*anchorL]=='(' && !INSTRING) parentheses++; if (argum[*anchorL]==')' && !INSTRING) parentheses--; if (argum[*anchorL]==EOS) { SOAP_fatal_error(codex,"Reached end of string while trying to grab argument#%ld from string " ">%s<.",n+1,argum); } (*anchorL)++; } while (!(argum[*anchorL]==',' && parentheses==0 && !INSTRING)); (*anchorL)++; } cnt=*anchorL; parentheses=0; INSTRING=FALSE; do { if (argum[cnt]=='"') INSTRING=!INSTRING; if (argum[cnt]=='(' && !INSTRING) parentheses++; if (argum[cnt]==')' && !INSTRING) parentheses--; (*expr)=(char *)realloc(*expr,(cnt-(*anchorL)+3)*sizeof(char)); (*expr)[cnt-(*anchorL)]=argum[cnt]; cnt++; } while (!(argum[cnt]==',' && parentheses==0 && !INSTRING) && argum[cnt]!=EOS); (*anchorR)=cnt-1; (*expr)[(*anchorR)-(*anchorL)+1]=EOS; } /* get the nth argument and store it in *expr; arguments are counted from 0. *expr must already have been malloc'ed*/ void SOAP_get_argum_straight(SOAP_codex_t *codex, char **expr, char *argum, long n){ long anchorR,anchorL; get_argum_straight_0(codex,expr,argum,n,&anchorL,&anchorR); } /* get the nth argument and store what is in between the quotes of the string in *expr; arguments are counted from 0. *expr must already have been malloc'ed*/ void SOAP_get_argum_string(SOAP_codex_t *codex, char **expr, char *argum, long n){ long anchorR,anchorL; get_argum_straight_0(codex,expr,argum,n,&anchorL,&anchorR); if ((*expr)[0]=='"') { SOAP_strcut(0, 0, *expr); } else { SOAP_fatal_error(codex,"String does not start with \"."); } if ((*expr)[strlen(*expr)-1]=='"') { SOAP_strcut(strlen(*expr)-1, strlen(*expr)-1, *expr); } else { SOAP_fatal_error(codex,"String does not end with \"."); } } /* get the nth argument; arguments are counted from 0*/ double SOAP_get_argum_double(SOAP_codex_t *codex, char *argum, long n){ char *expr; double tmp; int eos = EOS; expr=(char *)malloc(sizeof(char)); SOAP_get_argum_straight(codex,&expr, argum, n); if (sscanf(expr,"%lg%n",&tmp,&eos)!=1 || expr[eos]!=EOS){ SOAP_fatal_error(codex,"\"%s\" is not a float.",expr); } free(expr); return(tmp); } /* get the nth argument; arguments are counted from 0*/ long SOAP_get_argum_long(SOAP_codex_t *codex, char *argum, long n){ char *expr; long tmp; int eos = EOS; expr=(char *)malloc(sizeof(char)); SOAP_get_argum_straight(codex,&expr, argum, n); if (sscanf(expr,"%ld%n",&tmp,&eos)!=1 || expr[eos]!=EOS){ SOAP_fatal_error(codex,"\"%s\" is not an integer.",expr); } free(expr); return(tmp); } /* get the nth argument; arguments are counted from 0*/ long SOAP_get_argum_bool(SOAP_codex_t *codex, char *argum, bool n){ char *expr; long tmp; int eos = EOS; expr=(char *)malloc(sizeof(char)); SOAP_get_argum_straight(codex,&expr, argum, n); if (sscanf(expr,"%ld%n",&tmp,&eos)!=1 || expr[eos]!=EOS){ SOAP_fatal_error(codex,"\"%s\" is not a boolean.",expr); } if (tmp<0 || tmp>1) { SOAP_fatal_error(codex,"\"%s\" is not a boolean.",expr); } free(expr); return(tmp); } static void functions_builtin(char *function, char **argum, char **returnstr, SOAP_codex_t *codex){ double tmp,returnval; long functionnum,numargum,cnt; int eos=EOS; functionnum=0; if (strcmp(function,"rad")==0) functionnum=1; if (strcmp(function,"deg")==0) functionnum=2; if (strcmp(function,"sin")==0) functionnum=3; if (strcmp(function,"cos")==0) functionnum=4; if (strcmp(function,"tan")==0) functionnum=5; if (strcmp(function,"asin")==0) functionnum=6; if (strcmp(function,"acos")==0) functionnum=7; if (strcmp(function,"atan")==0) functionnum=8; if (strcmp(function,"sqrt")==0) functionnum=9; if (strcmp(function,"sqr")==0) functionnum=10; if (strcmp(function,"exp")==0) functionnum=11; if (strcmp(function,"ln")==0) functionnum=12; if (strcmp(function,"round")==0) functionnum=13; if (strcmp(function,"floor")==0) functionnum=14; if (strcmp(function,"abs")==0) functionnum=15; if (strcmp(function,"sinh")==0) functionnum=16; if (strcmp(function,"cosh")==0) functionnum=17; if (strcmp(function,"tanh")==0) functionnum=18; if (strcmp(function,"asinh")==0) functionnum=19; if (strcmp(function,"acosh")==0) functionnum=20; if (strcmp(function,"atanh")==0) functionnum=21; if (functionnum>0 && functionnum<22) { SOAP_substitute_all_argums(argum, codex); if (sscanf(*argum,"%lg%n",&tmp,&eos)!=1 || (*argum)[eos]!=EOS) SOAP_fatal_error(codex,"Problem evaluating expression >%s<.",*argum); *returnstr=(char *)realloc(*returnstr,maxnumlen*sizeof(char)); returnval=0.0e0; /* to avoid compiler warning only */ switch (functionnum) { case 1: returnval=rad(tmp); break; case 2: returnval=deg(tmp); break; case 3: returnval=sin(tmp); break; case 4: returnval=cos(tmp); break; case 5: returnval=tan(tmp); break; case 6: returnval=asin(tmp); break; case 7: returnval=acos(tmp); break; case 8: returnval=atan(tmp); break; case 9: returnval=sqrt(tmp); break; case 10: returnval=sqr(tmp); break; case 11: returnval=exp(tmp); break; case 12: returnval=log(tmp); break; case 13: returnval=round(tmp); break; case 14: returnval=floor(tmp); break; case 15: returnval=fabs(tmp); break; case 16: returnval=sinh(tmp); break; case 17: returnval=cosh(tmp); break; case 18: returnval=tanh(tmp); break; case 19: returnval=asinh(tmp); break; case 20: returnval=acosh(tmp); break; case 21: returnval=atanh(tmp); break; } sprintf(*returnstr,DOUBLEFORMAT,returnval); } if (strcmp(function,"min")==0) { SOAP_substitute_all_argums(argum, codex); numargum=SOAP_number_argums(*argum); returnval=SOAP_get_argum_double(codex,*argum,0); for (cnt=1; cnt<numargum; cnt++) returnval=min(returnval,SOAP_get_argum_double(codex,*argum,cnt)); *returnstr=(char *)realloc(*returnstr,maxnumlen*sizeof(char)); sprintf(*returnstr,DOUBLEFORMAT,returnval); } if (strcmp(function,"max")==0) { SOAP_substitute_all_argums(argum, codex); numargum=SOAP_number_argums(*argum); returnval=SOAP_get_argum_double(codex,*argum,0); for (cnt=1; cnt<numargum; cnt++) returnval=max(returnval,SOAP_get_argum_double(codex,*argum,cnt)); *returnstr=(char *)realloc(*returnstr,maxnumlen*sizeof(char)); sprintf(*returnstr,DOUBLEFORMAT,returnval); } if (strcmp(function,"random")==0) { SOAP_substitute_all_argums(argum, codex); returnval=random_double(SOAP_get_argum_double(codex,*argum,0),SOAP_get_argum_double(codex,*argum,1)); *returnstr=(char *)realloc(*returnstr,maxnumlen*sizeof(char)); sprintf(*returnstr,DOUBLEFORMAT,returnval); } if (strcmp(function,"mod")==0) { SOAP_substitute_all_argums(argum, codex); returnval=mod(longfromdouble(SOAP_get_argum_double(codex,*argum,0)),longfromdouble(SOAP_get_argum_double(codex,*argum,1))); *returnstr=(char *)realloc(*returnstr,maxnumlen*sizeof(char)); sprintf(*returnstr,DOUBLEFORMAT,returnval); } if (strcmp(function,"krodelta")==0) { SOAP_substitute_all_argums(argum, codex); returnval=krodelta(longfromdouble(SOAP_get_argum_double(codex,*argum,0)),longfromdouble(SOAP_get_argum_double(codex,*argum,1))); *returnstr=(char *)realloc(*returnstr,maxnumlen*sizeof(char)); sprintf(*returnstr,DOUBLEFORMAT,returnval); } if (strcmp(function,"defined")==0) { *returnstr=(char *)realloc(*returnstr,maxnumlen*sizeof(char)); if (sscanf(*argum,"%lg%n",&tmp,&eos)==1 && (*argum)[eos]==EOS) strcpy(*returnstr,"1"); else strcpy(*returnstr,"0"); } if (strcmp(function,"spline")==0) { long N,n; double *f,*b,*x; double thisx; N=SOAP_number_argums(*argum); if (mod(N-1,2)!=0) SOAP_fatal_error(codex,"Number of arguments within spline must be an odd number."); N=(N-1)/2; if (N<4) SOAP_fatal_error(codex,"Number of data points supplied within spline must be at least 4."); x=(double *)malloc(N*sizeof(double)); f=(double *)malloc(N*sizeof(double)); b=(double *)malloc(N*sizeof(double)); for (n=0; n<N; n++) { SOAP_substitute_argum(argum, n*2, codex); x[n]=SOAP_get_argum_double(codex, *argum, n*2); SOAP_substitute_argum(argum, n*2+1, codex); f[n]=SOAP_get_argum_double(codex, *argum, n*2+1); } /* check if data points are valid (x[n+1]>x[n]) */ for (n=0; n<N-1; n++){ if (x[n+1]<=x[n]) SOAP_fatal_error(codex, "Data points supplied to spline must be such that x[i+1]>x[i]."); } SOAP_substitute_argum(argum, N*2, codex); thisx=SOAP_get_argum_double(codex, *argum, N*2); /* check if point is out of range */ if (thisx<x[0] || thisx>x[N-1]) SOAP_fatal_error(codex, "Ensure that x lies between x[0] and x[N]."); EXM_find_spline(N, x, f, b); *returnstr=(char *)realloc(*returnstr,maxnumlen*sizeof(char)); sprintf(*returnstr,DOUBLEFORMAT,EXM_f_from_spline(N, x, f, b, thisx)); free(x); free(b); free(f); } } static void substitute_functions(char **expr, SOAP_codex_t *codex){ long cnt,anchorLL,anchorL,anchorR,parentheses; bool FOUND; char *function,*argum,*returnstr; returnstr=(char *)malloc(sizeof(char)); do { cnt=0; anchorL=0; FOUND=FALSE; do { cnt++; if ((*expr)[cnt]=='(' && is_part_of_var((*expr)[cnt-1])) { FOUND=TRUE; anchorL=cnt; } } while ((*expr)[cnt]!=EOS); if (FOUND) { cnt=anchorL; do { cnt--; } while (cnt>0 && is_part_of_var((*expr)[cnt-1]) ); anchorLL=cnt; cnt=anchorL; parentheses=0; do { if ((*expr)[cnt]=='"') SOAP_fatal_error(codex,"Found a quote inside an expression."); if ((*expr)[cnt]=='(') parentheses++; if ((*expr)[cnt]==')') parentheses--; cnt++; } while (parentheses!=0); anchorR=cnt-1; function=(char *)malloc((anchorL-anchorLL+4)*sizeof(char)); argum=(char *)malloc((anchorR-anchorL+4)*sizeof(char)); for (cnt=anchorLL; cnt<anchorL; cnt++) function[cnt-anchorLL]=(*expr)[cnt]; function[anchorL-anchorLL]=EOS; for (cnt=anchorL+1; cnt<anchorR; cnt++) argum[cnt-anchorL-1]=(*expr)[cnt]; argum[anchorR-anchorL-1]=EOS; returnstr=(char *)realloc(returnstr,(3+(long)strlen(function))*sizeof(char)); strcpy(returnstr,function); if (codex->FUNCTION) (codex->function)(function,&argum,&returnstr,codex); functions_builtin(function, &argum, &returnstr, codex); SOAP_strcut(anchorLL,anchorR,*expr); SOAP_strins(returnstr,expr,anchorLL); free(argum); free(function); } } while (FOUND); free(returnstr); } bool SOAP_is_double_a_long(double expr_double){ double expr_double2; long expr_long; bool RET; expr_long=(long)expr_double; expr_double2=(double)expr_long; if (expr_double2!=expr_double) RET=FALSE; else RET=TRUE; return(RET); } /* evaluate the expr **expr and substitute it for the number it stands for. This includes evaluating the variables, the functions and finally the arithmetic */ void SOAP_substitute_expression(char **expr, SOAP_codex_t *codex){ double expr_double; char *expr_str; expr_str=(char *)malloc(maxnumlen*sizeof(char)); substitute_vars(expr, codex); substitute_functions(expr, codex); expr_double=SOAP_evaluate_arithmetic(codex,*expr); (*expr)[0]=EOS; if (!SOAP_is_double_a_long(expr_double)){ sprintf(expr_str,DOUBLEFORMAT,expr_double); SOAP_strins(expr_str,expr,0); } else { sprintf(expr_str,"%ld",(long)expr_double); SOAP_strins(expr_str,expr,0); } free(expr_str); } /* in the given *expr, substitute the string comparisons, like "bernard"=="jeny" would be evaluated to false, and substituted by 0 */ void SOAP_substitute_string_arithmetic(char **expr, SOAP_codex_t *codex){ char *leftstring; char *rightstring; long cnt,anchor,anchorL,anchorR; bool INSTRING; int strcmp_ret; char newexpr[2]; if (strlen(*expr)>4) { anchor=0; if ((*expr)[0]=='"') INSTRING=TRUE; else INSTRING=FALSE; do { anchor++; if ((*expr)[anchor]=='"') INSTRING=!INSTRING; if ( ((*expr)[anchor]==EQ || (*expr)[anchor]==NEQ || (*expr)[anchor]==LT || (*expr)[anchor]==GT || (*expr)[anchor]==GEQ || (*expr)[anchor]==LEQ) && (*expr)[anchor-1]=='"' && (*expr)[anchor+1]=='"' && !INSTRING) { /* first find left string */ anchorL=anchor-1; do { anchorL--; } while ((*expr)[anchorL]!='"' && anchorL>0); if ((*expr)[anchorL]!='"') SOAP_fatal_error(codex,"Problem in subroutine SOAP_StringArith(2)."); leftstring=(char *)malloc(sizeof(char)*(anchor-anchorL+2)); for (cnt=anchorL+1; cnt<anchor-1; cnt++){ leftstring[cnt-anchorL-1]=(*expr)[cnt]; } leftstring[anchor-anchorL-2]=EOS; /* find right string */ anchorR=anchor+1; do { (anchorR)++; } while ((*expr)[anchorR]!='"' && (*expr)[anchorR]!=EOS); if ((*expr)[anchorR]!='"') SOAP_fatal_error(codex,"Problem in subroutine SOAP_StringArith(3)."); rightstring=(char *)malloc(sizeof(char)*(anchorR-anchor+2)); for (cnt=anchor+2; cnt<anchorR; cnt++){ rightstring[cnt-anchor-2]=(*expr)[cnt]; } rightstring[anchorR-anchor-2]=EOS; /* printf("leftstring=>%s< rightstring=>%s<\n",leftstring,rightstring); */ /* compare the two strings and find newexpr */ strcmp_ret=strcmp(leftstring,rightstring); switch ((*expr)[anchor]) { case EQ: if (strcmp_ret==0) (newexpr)[0]='1'; else (newexpr)[0]='0'; break; case NEQ: if (strcmp_ret!=0) (newexpr)[0]='1'; else (newexpr)[0]='0'; break; case LT: if (strcmp_ret<0) (newexpr)[0]='1'; else (newexpr)[0]='0'; break; case GT: if (strcmp_ret>0) (newexpr)[0]='1'; else (newexpr)[0]='0'; break; case LEQ: if (strcmp_ret<=0) (newexpr)[0]='1'; else (newexpr)[0]='0'; break; case GEQ: if (strcmp_ret>=0) (newexpr)[0]='1'; else (newexpr)[0]='0'; break; } (newexpr)[1]=EOS; SOAP_strcut(anchorL,anchorR,*expr); SOAP_strins(newexpr, expr, anchorL); anchor=anchorL; free(leftstring); free(rightstring); } } while ((*expr)[anchor+1]!=EOS); } } /* evaluate the expr **expr and substitute it for the number it stands for. This includes evaluating the variables, the functions and finally the arithmetic of all sub-expressions outside of the strings. Sub-expressions which are strings are left untouched. If some sub-expressions are strings, then the final expression is made into a string*/ void SOAP_substitute_expression_including_strings(char **expr, SOAP_codex_t *codex){ long anchorL, anchorR, cnt, pass; char *exprtmp; bool QUOTEFOUND,INSTRING; for (pass=1; pass<=2; pass++){ if (pass==2) SOAP_substitute_string_arithmetic(expr,codex); anchorL=0; do { /* find the anchorL and anchorR corresponding to non-string */ anchorL--; INSTRING=FALSE; do { anchorL++; if ((*expr)[anchorL]=='"') INSTRING=!INSTRING; } while (INSTRING || (*expr)[anchorL]=='"'); anchorR=anchorL; if ((*expr)[anchorR]!=EOS){ do { anchorR++; } while ((*expr)[anchorR]!='"' && (*expr)[anchorR]!=EOS); anchorR--; } /* substitute variables or expressions*/ if ((*expr)[anchorL]!=EOS) { exprtmp=(char *)malloc(sizeof(char)*(anchorR-anchorL+3)); for (cnt=anchorL; cnt<=anchorR; cnt++) exprtmp[cnt-anchorL]=(*expr)[cnt]; exprtmp[anchorR-anchorL+1]=EOS; if (pass==1) substitute_vars(&exprtmp, codex); if (pass==2) SOAP_substitute_expression(&exprtmp, codex); SOAP_strcut(anchorL,anchorR,*expr); SOAP_strins(exprtmp, expr, anchorL); anchorL+=strlen(exprtmp); free(exprtmp); } } while ((*expr)[anchorL]!=EOS); } /* clean up _all quotes and, if quotes were found, add quotes at end and start of *expr */ anchorL=0; QUOTEFOUND=FALSE; do { if ((*expr)[anchorL]=='"') { SOAP_strcut(anchorL,anchorL,*expr); anchorL--; QUOTEFOUND=TRUE; } anchorL++; } while((*expr)[anchorL]!=EOS); if (QUOTEFOUND) { SOAP_strins("\"", expr, 0); SOAP_strins("\"", expr, strlen(*expr)); } } /* sub the expression located in nth argument with SOAP_substitute_expression; arguments are counted from 0*/ void SOAP_substitute_argum(char **argum, long n, SOAP_codex_t *codex){ char *expr; long anchorL,anchorR; expr=(char *)malloc((long)(strlen(*argum)+3)*sizeof(char)); get_argum_straight_0(codex,&expr, *argum, n, &anchorL, &anchorR); SOAP_substitute_expression_including_strings(&expr, codex); SOAP_strcut(anchorL,anchorR,*argum); SOAP_strins(expr,argum,anchorL); free(expr); } /* returns the number of arguments */ long SOAP_number_argums(char *argum){ long cnt,commas,parentheses; bool INSTRING; if (strlen(argum)==0) { commas=0; } else { cnt=0; commas=0; parentheses=0; INSTRING=FALSE; do { if (argum[cnt]=='"') INSTRING=!INSTRING; if (argum[cnt]==',' && parentheses==0 && !INSTRING) commas++; if (argum[cnt]=='(' && !INSTRING) parentheses++; if (argum[cnt]==')' && !INSTRING) parentheses--; cnt++; } while(argum[cnt]!=EOS); commas++; } return(commas); } void SOAP_substitute_all_argums(char **argum, SOAP_codex_t *codex){ long cnt,numargum; numargum=SOAP_number_argums(*argum); for (cnt=0; cnt<numargum; cnt++) SOAP_substitute_argum(argum, cnt, codex); } /* static void ShowCode(char *code){ long cnt; printf("code starts here -->"); for (cnt=0; cnt<(long)strlen(code); cnt++){ printf("%c",code[cnt]); } printf("<-- code ended there\n"); fflush(stdout); }*/ static void delete_character(long cnt, char *code){ long cnt2; long codelength; codelength=(long)strlen(code); for (cnt2=cnt+1; cnt2<codelength; cnt2++) code[cnt2-1]=code[cnt2]; code[codelength-1]=EOS; } static void clean_comments(SOAP_codex_t *codex, char *code){ long cnt; long parentheses,anchorL,anchorR; bool INSTRING; char *lastnewline; cnt=0; parentheses=0; anchorL=0; INSTRING=FALSE; while (code[cnt]!=EOS) { if (code[cnt]=='"') INSTRING=!INSTRING; if (code[cnt]=='{' && !INSTRING) { parentheses++; if (parentheses==1) anchorL=cnt; } if (code[cnt]=='}' && !INSTRING) { parentheses--; if (parentheses==0) { anchorR=cnt; SOAP_strcut(anchorL,anchorR,code); cnt=cnt-(anchorR-anchorL+1); } if (parentheses==-1){ code[cnt]=EOS; lastnewline=code; while( (code = strstr(code,"__newline"))){ lastnewline = code++; } if (sscanf(lastnewline,"__newline(%ld)",&(codex->linenum))!=1) codex->linenum=-1; SOAP_fatal_error(codex,"Comment closed but not opened."); } } cnt++; } if (code[cnt]==EOS && INSTRING) { lastnewline=code; while( (code = strstr(code,"__newline"))){ lastnewline = code++; } if (sscanf(lastnewline,"__newline(%ld)",&(codex->linenum))!=1) codex->linenum=-1; SOAP_fatal_error(codex,"String not closed properly."); } if (code[cnt]==EOS && parentheses!=0) { lastnewline=code; while( (code = strstr(code,"__newline"))){ lastnewline = code++; } if (sscanf(lastnewline,"__newline(%ld)",&(codex->linenum))!=1) codex->linenum=-1; SOAP_fatal_error(codex,"Comment not closed properly."); } } /* only insert the __newline(); action in front of an action*/ void SOAP_insert_line_numbers_in_code_backward(char **code, long linenum_start){ long cnt,linenum,linenum2,cnt2,parentheses,commentbrackets; char *newlinestr; bool INSTRING,INSTRING2,CONTINUE; newlinestr=(char *)malloc(sizeof(char)*100); cnt=0; linenum=linenum_start; sprintf(newlinestr,"__newline(%ld);",linenum); SOAP_strins(newlinestr, code, cnt); cnt=cnt+strlen(newlinestr); INSTRING=FALSE; commentbrackets=0; CONTINUE=TRUE; do { if ((*code)[cnt]=='"') INSTRING=!INSTRING; if ((*code)[cnt]=='{') commentbrackets++; if ((*code)[cnt]=='}') commentbrackets--; if ((*code)[cnt]=='\n') linenum++; if ((*code)[cnt]==';' && !INSTRING && commentbrackets==0) { /* here, go back to previous ; or , or ( making sure parentheses is zero and not in a string */ cnt2=cnt; linenum2=linenum; parentheses=0; INSTRING2=FALSE; do { cnt2--; if ((*code)[cnt2]=='\n') linenum2--; if ((*code)[cnt2+1]=='(' && !INSTRING2 && commentbrackets==0) parentheses--; if ((*code)[cnt2+1]==')' && !INSTRING2 && commentbrackets==0) parentheses++; if ((*code)[cnt2+1]=='"') INSTRING2=!INSTRING2; if ((*code)[cnt2+1]=='{') commentbrackets--; if ((*code)[cnt2+1]=='}') commentbrackets++; if (cnt2<0) { CONTINUE=FALSE; /* fprintf(stderr,"\n\nproblem inserting line numbers aroung line %ld\n\n",linenum); exit(EXIT_FAILURE); */ } } while (CONTINUE && (((*code)[cnt2]!='(' && (*code)[cnt2]!=';' && (*code)[cnt2]!=',' && cnt2!=0) || INSTRING2 || parentheses!=0 || commentbrackets!=0)); /* then, do this */ if (CONTINUE){ do { cnt2++; if ((*code)[cnt2]=='\n') linenum2++; if ((*code)[cnt2]=='{') commentbrackets++; if ((*code)[cnt2-1]=='}') commentbrackets--; if ((*code)[cnt2]==EOS) { CONTINUE=FALSE; /* fprintf(stderr,"\n\nproblem inserting line numbers aroung line %ld\n\n",linenum); exit(EXIT_FAILURE); */ } } while(CONTINUE && ((*code)[cnt2]==' ' || (*code)[cnt2]=='\n' || (*code)[cnt2]=='\t' || (*code)[cnt2]==13 || commentbrackets!=0)); if (CONTINUE){ sprintf(newlinestr,"__newline(%ld);",linenum2); SOAP_strins(newlinestr, code, cnt2); cnt=cnt+strlen(newlinestr); commentbrackets=0; } } } cnt++; } while((*code)[cnt]!=EOS && CONTINUE); } /* insert the __newline(); action at all newlines */ void SOAP_insert_line_numbers_in_code(char **code, long linenum_start){ long cnt,linenum,linenumwritten,commentbrackets; char *newlinestr; bool INSTRING; newlinestr=(char *)malloc(sizeof(char)*1000); linenum=linenum_start; sprintf(newlinestr,"__newline(%ld);",linenum); SOAP_strins(newlinestr, code, 0); //SOAP_insert_line_numbers_in_code_backward(code, linenum_start); /* add a __newline() command after each ';' followed by spaces, tabs or new lines */ cnt=0; INSTRING=FALSE; commentbrackets=0; linenumwritten=-1; do { if ((*code)[cnt]=='"') INSTRING=!INSTRING; if ((*code)[cnt]=='{') commentbrackets++; if ((*code)[cnt]=='}') commentbrackets--; if ((*code)[cnt]=='\n') linenum++; if ((*code)[cnt]==';' && !INSTRING && commentbrackets==0) { while ((*code)[cnt+1]=='\n' || (*code)[cnt+1]==' ' || (*code)[cnt+1]=='\t' || (*code)[cnt+1]=='{'){ if ((*code)[cnt+1]=='{') { commentbrackets++; while (commentbrackets>0 && (*code)[cnt+2]!=EOS) { cnt++; if ((*code)[cnt+1]=='{') commentbrackets++; if ((*code)[cnt+1]=='\n') linenum++; if ((*code)[cnt+1]=='}') commentbrackets--; /* if ((*code)[cnt+1]==EOS) { fprintf(stderr,"\n\nComment not closed properly. SOAP fatal error in the vicinity of line %ld.\n\nExiting.\n\n",linenum); exit(EXIT_FAILURE); }*/ } // at this point, (*code)[cnt+1]='}' } if ((*code)[cnt+1]=='\n') { linenum++; sprintf(newlinestr,"__newline(%ld);",linenum); linenumwritten=linenum; if ((*code)[cnt+2]!=EOS) { SOAP_strins(newlinestr, code, cnt+2); cnt+=strlen(newlinestr); } } cnt++; } if (linenum!=linenumwritten) { sprintf(newlinestr,"__newline(%ld);",linenum); if ((*code)[cnt+1]!=EOS) SOAP_strins(newlinestr, code, cnt+1); cnt=cnt+strlen(newlinestr); } } cnt++; } while((*code)[cnt]!=EOS); free(newlinestr); //printf("%s",*code); } static void clean_code(SOAP_codex_t *codex, char *code){ long cnt; bool WINDOWS,INSTRING; clean_comments(codex,code); cnt=0; WINDOWS=FALSE; INSTRING=FALSE; while (code[cnt]!=EOS) { if (code[cnt]=='"') INSTRING=!INSTRING; if (!INSTRING) { if (code[cnt]=='=' && code[cnt+1]=='=') { code[cnt]=EQ; code[cnt+1]=' '; } if (code[cnt]=='!' && code[cnt+1]=='=') { code[cnt]=NEQ; code[cnt+1]=' '; } if (code[cnt]=='>' && code[cnt+1]=='=') { code[cnt]=GEQ; code[cnt+1]=' '; } if (code[cnt]=='<' && code[cnt+1]=='=') { code[cnt]=LEQ; code[cnt+1]=' '; } if (code[cnt]=='&' && code[cnt+1]=='&') { code[cnt]=AND; code[cnt+1]=' '; } if (code[cnt]=='|' && code[cnt+1]=='|') { code[cnt]=OR; code[cnt+1]=' '; } if (code[cnt]=='<') code[cnt]=LT; if (code[cnt]=='>') code[cnt]=GT; if (code[cnt]=='!') code[cnt]=NOT; } if (code[cnt]==13) WINDOWS=TRUE; if (( code[cnt]==' ' || code[cnt]=='\n' || code[cnt]=='\t' || code[cnt]==13 ) && (!INSTRING) ) delete_character(cnt,code); else cnt++; } if (WINDOWS) fprintf(stdout,"Your code contains some DOS(TM) end-of-line characters (#13). \n"); if (INSTRING) fprintf(stdout,"String not closed properly.\n"); } /* update the variable named *name to the value specified in *argum */ static void update_var(char **name, char **argum, SOAP_codex_t *codex){ long cnt; bool FOUND; substitute_array_elements(name,codex); SOAP_substitute_argum(argum,0,codex); cnt=0; FOUND=FALSE; if ((codex->vars)[0].name!=NULL) { do { if (strcmp(*name,(codex->vars)[cnt].name)==0) { FOUND=TRUE; (codex->vars)[cnt].value=(char *)realloc((codex->vars)[cnt].value, ((long)strlen(*argum)+3)*sizeof(char)); strcpy((codex->vars)[cnt].value,*argum); } cnt++; } while ((codex->vars)[cnt].name!=NULL); } if (!FOUND) { codex->vars=(SOAP_vars_t *)realloc(codex->vars,(cnt+5)*sizeof(SOAP_vars_t)); (codex->vars)[cnt].name=(char *)malloc(((long)strlen(*name)+3)*sizeof(char)); (codex->vars)[cnt].value=(char *)malloc(((long)strlen(*argum)+3)*sizeof(char)); strcpy((codex->vars)[cnt].name,*name); strcpy((codex->vars)[cnt].value,*argum); (codex->vars)[cnt+1].name=NULL; } } /* the builtin actions */ static void BA_write(char **argum, SOAP_codex_t *codex, bool NEWLINE){ if (codex->SCREENOUTPUT) { SOAP_substitute_all_argums(argum,codex); fprintf(stdout,"%s",*argum); if (NEWLINE) fprintf(stdout,"\n"); fflush(stdout); } } static void BA_printf(char **argum, SOAP_codex_t *codex){ long cnt,numargum,cnt2; char **argv; numargum=SOAP_number_argums(*argum); if (numargum<1) SOAP_fatal_error(codex,"Number of arguments given to printf must be at least 1."); assert(numargum>0); for (cnt=0; cnt<numargum; cnt++) SOAP_substitute_argum(argum,cnt,codex); argv=(char **)malloc(numargum*sizeof(char *)); for (cnt=0; cnt<numargum; cnt++){ argv[cnt]=(char *)malloc(sizeof(char)); SOAP_get_argum_straight(codex,&(argv[cnt]), *argum, cnt); } for (cnt=0; cnt<numargum; cnt++){ cnt2=0; do { if (argv[cnt][cnt2]=='"') { SOAP_strcut(cnt2,cnt2,argv[cnt]); cnt2--; } cnt2++; } while (argv[cnt][cnt2]!=EOS); } /* send everything to printf, to printf to stdout */ if (codex->SCREENOUTPUT) SOAP_printf((int)numargum,argv,stdout); /* free pointers */ for (cnt=0; cnt<numargum; cnt++) free(argv[cnt]); free(argv); } static void BA_fprintf(char **argum, SOAP_codex_t *codex){ long numargum,cnt,cnt2; FILE *stream; char **argv; char *filename; numargum=SOAP_number_argums(*argum); if (numargum<2) SOAP_fatal_error(codex,"Number of arguments given to fprintf must be at least 2: the filename and the string to print."); assert(numargum>1); for (cnt=0; cnt<numargum; cnt++) SOAP_substitute_argum(argum,cnt,codex); argv=(char **)malloc(numargum*sizeof(char *)); for (cnt=1; cnt<numargum; cnt++){ argv[cnt-1]=(char *)malloc(sizeof(char)); SOAP_get_argum_straight(codex,&(argv[cnt-1]), *argum, cnt); } for (cnt=1; cnt<numargum; cnt++){ cnt2=0; do { if (argv[cnt-1][cnt2]=='"') { SOAP_strcut(cnt2,cnt2,argv[cnt-1]); cnt2--; } cnt2++; } while (argv[cnt-1][cnt2]!=EOS); } filename=(char *)malloc(sizeof(char)); SOAP_get_argum_string(codex, &filename, *argum, 0); if (codex->FILEOUTPUT) { /* here, append to the file named in the first argument */ stream=fopen(filename,"a"); /* send everything to printf*/ SOAP_printf((int)numargum-1,argv,stream); fclose(stream); } /* free pointers */ for (cnt=1; cnt<numargum; cnt++) free(argv[cnt-1]); free(filename); free(argv); } static void BA_for(char **argum, SOAP_codex_t *codex){ char *cntstr,*loopcode,*cntstr2; long cnts,cnte,cnt; if (SOAP_number_argums(*argum)!=4) SOAP_fatal_error(codex,"the for() command needs 4 arguments: " "the first argument is the counter variable name; " "the second argument is the start of the counting (integer); " "the third argument is the end of the counting (integer); " "the fourth argument is the code to be executed at every count."); cntstr=(char *)malloc(sizeof(char)); cntstr2=(char *)malloc(sizeof(char)); loopcode=(char *)malloc(sizeof(char)); SOAP_substitute_argum(argum,1,codex); SOAP_substitute_argum(argum,2,codex); cnts=SOAP_get_argum_long(codex,*argum,1); cnte=SOAP_get_argum_long(codex,*argum,2); SOAP_get_argum_straight(codex,&cntstr,*argum,0); SOAP_get_argum_straight(codex,&loopcode, *argum, 3); if (cnts<=cnte) { for (cnt=cnts; cnt<=cnte; cnt++){ /* change value of cntstr to cntstr2 in variables */ cntstr2=(char *)realloc(cntstr2,maxnumlen*sizeof(char)); sprintf(cntstr2,"%ld",cnt); update_var(&cntstr, &cntstr2, codex); SOAP_process_code(loopcode, codex, SOAP_VARS_KEEP_ALL); } } else { for (cnt=cnts; cnt>=cnte; cnt--){ /* change value of cntstr to cntstr2 in variables */ cntstr2=(char *)realloc(cntstr2,maxnumlen*sizeof(char)); sprintf(cntstr2,"%ld",cnt); update_var(&cntstr, &cntstr2, codex); SOAP_process_code(loopcode, codex, SOAP_VARS_KEEP_ALL); } } free(cntstr); free(cntstr2); free(loopcode); } static void BA_for_parallel(char **argum, SOAP_codex_t *codex){ char *cntstr,*loopcode,*cntstr2; long cnts,cnte,cnt,cntvar,numvars,cnttmp; SOAP_codex_t *codexcopy; if (SOAP_number_argums(*argum)!=4) SOAP_fatal_error(codex,"the for_parallel() command needs 4 arguments: " "the first argument is the counter variable name; " "the second argument is the start of the counting (integer); " "the third argument is the end of the counting (integer); " "the fourth argument is the code to be executed at every count."); SOAP_substitute_argum(argum,1,codex); SOAP_substitute_argum(argum,2,codex); cnts=SOAP_get_argum_long(codex,*argum,1); cnte=SOAP_get_argum_long(codex,*argum,2); if (cnts>cnte) { cnttmp=cnte; cnte=cnts; cnts=cnttmp; } codexcopy=(SOAP_codex_t *)malloc((cnte-cnts+2)*sizeof(SOAP_codex_t)); #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt,cntstr2,cntstr,loopcode) schedule(dynamic) #endif for (cnt=cnts; cnt<=cnte; cnt++){ SOAP_copy_codex(codex, &(codexcopy[cnt-cnts])); (codexcopy[cnt-cnts]).vars=NULL; SOAP_copy_all_vars(codex->vars, &((codexcopy[cnt-cnts]).vars)); /* change value of cntstr to cntstr2 in variables */ loopcode=(char *)malloc(sizeof(char)); cntstr=(char *)malloc(sizeof(char)); SOAP_get_argum_straight(&(codexcopy[cnt-cnts]),&cntstr,*argum,0); SOAP_get_argum_straight(&(codexcopy[cnt-cnts]),&loopcode, *argum, 3); cntstr2=(char *)malloc(maxnumlen*sizeof(char)); sprintf(cntstr2,"%ld",cnt); update_var(&cntstr, &cntstr2, &(codexcopy[cnt-cnts])); SOAP_process_code(loopcode, &(codexcopy[cnt-cnts]), SOAP_VARS_KEEP_ALL); free(cntstr2); free(cntstr); free(loopcode); } for (cnt=cnts; cnt<=cnte; cnt++){ SOAP_count_all_vars(&(codexcopy[cnt-cnts]), &numvars); for (cntvar=0; cntvar<numvars; cntvar++){ SOAP_add_to_vars(codex, (codexcopy[cnt-cnts]).vars[cntvar].name, (codexcopy[cnt-cnts]).vars[cntvar].value); } SOAP_free_all_vars(((codexcopy[cnt-cnts]).vars)); SOAP_free_codex(&(codexcopy[cnt-cnts])); } } static void BA_if(char **argum, SOAP_codex_t *codex){ char *ifcode; ifcode=(char *)malloc(sizeof(char)); SOAP_substitute_argum(argum,0,codex); if (!(SOAP_number_argums(*argum)==2 || SOAP_number_argums(*argum)==3)) SOAP_fatal_error(codex,"Not the right number of arguments in if() command; " "the first argument is the condition; " "the second argument is the code to execute if the condition is true; " "the third argument (not required) is the code to execute if the condition if false."); if (is_logical(codex,longfromdouble(SOAP_get_argum_double(codex,*argum,0)))) { SOAP_get_argum_straight(codex,&ifcode, *argum, 1); SOAP_process_code(ifcode, codex, SOAP_VARS_KEEP_ALL); } else { if (SOAP_number_argums(*argum)==3){ SOAP_get_argum_straight(codex,&ifcode, *argum, 2); SOAP_process_code(ifcode, codex, SOAP_VARS_KEEP_ALL); } } free(ifcode); } static void BA_include(char **argum, SOAP_codex_t *codex){ char *includedcode; char *filename; char *message; char *filename_mem; filename_mem=(char *)malloc(sizeof(char)*(2+strlen(codex->filename))); strcpy(filename_mem,codex->filename); if (SOAP_number_argums(*argum)!=1) SOAP_fatal_error(codex,"Not the right number of arguments in include() command; " "the first and only argument is the name of the file to be included."); SOAP_substitute_argum(argum,0,codex); filename=(char *)malloc(sizeof(char)); includedcode=(char *)malloc(sizeof(char)); SOAP_get_argum_string(codex, &filename, *argum, 0); SOAP_store_file_as_string(filename, &includedcode); SOAP_insert_line_numbers_in_code(&includedcode, 1); message=(char *)malloc((100+strlen(filename))*sizeof(char)); sprintf(message,"%s\n included on line %ld of file ",filename,codex->linenum); SOAP_strins(message,&codex->filename,0); SOAP_process_code(includedcode, codex, SOAP_VARS_KEEP_ALL); strcpy(codex->filename,filename_mem); free(filename_mem); free(includedcode); free(filename); free(message); } static void BA_while(char **argum, SOAP_codex_t *codex){ char *loopcode,*condition; bool CONTINUE; condition=(char *)malloc(sizeof(char)); loopcode=(char *)malloc(sizeof(char)); SOAP_get_argum_straight(codex,&loopcode, *argum, 1); CONTINUE=TRUE; do { SOAP_get_argum_straight(codex,&condition,*argum,0); SOAP_substitute_argum(&condition,0,codex); if (!is_logical(codex,longfromdouble(SOAP_get_argum_double(codex,condition,0)))) CONTINUE=FALSE; if (CONTINUE) SOAP_process_code(loopcode, codex, SOAP_VARS_KEEP_ALL); } while (CONTINUE); free(condition); free(loopcode); } static void BA_exit(char **argum, SOAP_codex_t *codex){ long ret; SOAP_substitute_argum(argum,0,codex); ret=longfromdouble(SOAP_get_argum_double(codex,*argum,0)); #ifdef DISTMPI MPI_Finalize ( ); #endif exit(ret); } static void BA_system(char **argum, SOAP_codex_t *codex){ char *expr; expr=(char *)malloc(sizeof(char)); SOAP_substitute_argum(argum,0,codex); SOAP_get_argum_string(codex, &expr, *argum, 0); if (codex->SYSTEMCALL) { if (system(expr)==-1) fprintf(stdout,"Problem executing system command in BA_system()"); } } static void BA_newline(char **argum, SOAP_codex_t *codex){ SOAP_substitute_argum(argum,0,codex); codex->linenum=SOAP_get_argum_long(codex,*argum,0); } static void builtin_actions(char *action, char **argum, SOAP_codex_t *codex){ if (strcmp(action,"write")==0) { BA_write(argum,codex,FALSE); codex->ACTIONPROCESSED=TRUE; } if (strcmp(action,"writeln")==0) { BA_write(argum,codex,TRUE); codex->ACTIONPROCESSED=TRUE; } if (strcmp(action,"printf")==0) { BA_printf(argum,codex); codex->ACTIONPROCESSED=TRUE; } if (strcmp(action,"fprintf")==0) { BA_fprintf(argum,codex); codex->ACTIONPROCESSED=TRUE; } if (strcmp(action,"for")==0) { BA_for(argum,codex); codex->ACTIONPROCESSED=TRUE; } if (strcmp(action,"for_parallel")==0) { BA_for_parallel(argum,codex); codex->ACTIONPROCESSED=TRUE; } if (strcmp(action,"if")==0) { BA_if(argum,codex); codex->ACTIONPROCESSED=TRUE; } if (strcmp(action,"include")==0) { BA_include(argum,codex); codex->ACTIONPROCESSED=TRUE; } if (strcmp(action,"while")==0) { BA_while(argum,codex); codex->ACTIONPROCESSED=TRUE; } if (strcmp(action,"exit")==0) { BA_exit(argum,codex); codex->ACTIONPROCESSED=TRUE; } if (strcmp(action,"system")==0) { BA_system(argum,codex); codex->ACTIONPROCESSED=TRUE; } if (strcmp(action,"__newline")==0) { BA_newline(argum,codex); codex->ACTIONPROCESSED=TRUE; } } void SOAP_add_to_vars(SOAP_codex_t *codex, char *name, char *value){ long varnum; bool FOUNDMATCH; varnum=0; FOUNDMATCH=FALSE; while (codex->vars[varnum].name!=NULL) { if (strcmp(codex->vars[varnum].name,name)==0) { FOUNDMATCH=TRUE; codex->vars[varnum].value=(char *)realloc(codex->vars[varnum].value, ((long)strlen(value)+2)*sizeof(char)); strcpy(codex->vars[varnum].value,value); } varnum++; } if (!FOUNDMATCH) { codex->vars=(SOAP_vars_t *)realloc(codex->vars,(varnum+2)*sizeof(SOAP_vars_t)); codex->vars[varnum].name=(char *)malloc(((long)strlen(name)+2)*sizeof(char)); codex->vars[varnum].value=(char *)malloc(((long)strlen(value)+2)*sizeof(char)); strcpy(codex->vars[varnum].name,name); strcpy(codex->vars[varnum].value,value); codex->vars[varnum+1].name=NULL; } } void SOAP_add_int_to_vars(SOAP_codex_t *codex, char *name, int value){ char valuestr[100]; if (sprintf(valuestr,"%d",value)<0) SOAP_fatal_error(codex,"Problem converting within SOAP_add_int_to_vars(); name=%s value=%d valuestr=%s.",name,value,valuestr); SOAP_add_to_vars(codex,name,valuestr); } bool SOAP_is_var_in_codex(SOAP_codex_t *codex, char *name){ long varnum; bool FOUNDMATCH; varnum=0; FOUNDMATCH=FALSE; while (codex->vars[varnum].name!=NULL) { if (strcmp(codex->vars[varnum].name,name)==0) { FOUNDMATCH=TRUE; } varnum++; } return(FOUNDMATCH); } double SOAP_var_value(SOAP_codex_t *codex, char *name){ long varnum; bool FOUNDMATCH; double value; int eos=EOS; varnum=0; FOUNDMATCH=FALSE; while (codex->vars[varnum].name!=NULL) { if (strcmp(codex->vars[varnum].name,name)==0) { FOUNDMATCH=TRUE; if (sscanf(codex->vars[varnum].value,"%lg%n",&value,&eos)!=1 || (codex->vars[varnum].value)[eos]!=EOS) SOAP_fatal_error(codex,"Problem evaluating expression >%s<.",codex->vars[varnum].value); } varnum++; } if (!FOUNDMATCH) { SOAP_fatal_error(codex,"Can't find variable match for %s.",name); } return(value); } void SOAP_var_value_string(SOAP_codex_t *codex, char *name, char **value){ long varnum; bool FOUNDMATCH; varnum=0; FOUNDMATCH=FALSE; while (codex->vars[varnum].name!=NULL) { if (strcmp(codex->vars[varnum].name,name)==0) { FOUNDMATCH=TRUE; *value=(char *)realloc(*value,(strlen(codex->vars[varnum].value)+3)*sizeof(char)); strcpy(*value,codex->vars[varnum].value); } varnum++; } if (!FOUNDMATCH) { SOAP_fatal_error(codex,"Can't find variable match for %s.",name); } } void SOAP_copy_all_vars(SOAP_vars_t *vars1, SOAP_vars_t **vars2){ long varnum; varnum=0; *vars2=(SOAP_vars_t *)realloc(*vars2,2*sizeof(SOAP_vars_t)); (*vars2)[varnum].name=NULL; while (vars1[varnum].name!=NULL) { *vars2=(SOAP_vars_t *)realloc(*vars2,(varnum+2)*sizeof(SOAP_vars_t)); (*vars2)[varnum].name=(char *)malloc(((long)strlen(vars1[varnum].name)+2)*sizeof(char)); (*vars2)[varnum].value=(char *)malloc(((long)strlen(vars1[varnum].value)+2)*sizeof(char)); strcpy((*vars2)[varnum].name,vars1[varnum].name); strcpy((*vars2)[varnum].value,vars1[varnum].value); (*vars2)[varnum+1].name=NULL; varnum++; } } void SOAP_free_all_vars(SOAP_vars_t *vars){ long varnum; varnum=0; while (vars[varnum].name!=NULL) { free(vars[varnum].name); free(vars[varnum].value); varnum++; } vars[0].name=NULL; } void SOAP_free_codex_copy(SOAP_codex_t *codex){ free(codex->filename); if (codex->action_being_processed!=NULL) free(codex->action_being_processed); } void SOAP_free_codex(SOAP_codex_t *codex){ long varnum; varnum=0; while (codex->vars[varnum].name!=NULL) { free(codex->vars[varnum].name); free(codex->vars[varnum].value); varnum++; } codex->vars[0].name=NULL; free(codex->vars); free(codex->filename); if (codex->action_being_processed!=NULL) free(codex->action_being_processed); } void SOAP_init_codex(SOAP_codex_t *codex, const char *filename){ codex->vars=(SOAP_vars_t *)malloc(sizeof(SOAP_vars_t)); codex->vars[0].name=NULL; codex->VERBOSE=FALSE; codex->FUNCTION=FALSE; codex->ACTION=FALSE; codex->SCREENOUTPUT=TRUE; codex->FILEOUTPUT=TRUE; codex->SYSTEMCALL=TRUE; codex->ACTIONPROCESSED=FALSE; codex->linenum=0; codex->filename=(char *)malloc((2+strlen(filename))*sizeof(char)); strcpy(codex->filename,filename); codex->action_being_processed=NULL; } void SOAP_copy_codex(SOAP_codex_t *orig, SOAP_codex_t *copy){ copy->vars=orig->vars; copy->VERBOSE=orig->VERBOSE; copy->FUNCTION=orig->FUNCTION; copy->ACTION=orig->ACTION; copy->SCREENOUTPUT=orig->SCREENOUTPUT; copy->FILEOUTPUT=orig->FILEOUTPUT; copy->SYSTEMCALL=orig->SYSTEMCALL; copy->ACTIONPROCESSED=orig->ACTIONPROCESSED; copy->action=orig->action; copy->function=orig->function; copy->action_args=orig->action_args; copy->function_args=orig->function_args; copy->linenum=orig->linenum; copy->filename=(char *)malloc((strlen(orig->filename)+2)*sizeof(char)); strcpy(copy->filename,orig->filename); if (orig->action_being_processed!=NULL) { copy->action_being_processed=(char *)malloc((strlen(orig->action_being_processed)+2)*sizeof(char)); strcpy(copy->action_being_processed,orig->action_being_processed); } else { copy->action_being_processed=NULL; } } void SOAP_count_all_vars(SOAP_codex_t *codex, long *numvars){ long NULL_POS; NULL_POS=-1; do { NULL_POS++; } while ((codex->vars)[NULL_POS].name!=NULL); *numvars=NULL_POS; } void SOAP_clean_added_vars(SOAP_codex_t *codex, long numvarsinit){ long numvars,cnt; SOAP_count_all_vars(codex, &numvars); for (cnt=numvarsinit; cnt<numvars; cnt++) { free((codex->vars)[cnt].name); free((codex->vars)[cnt].value); } (codex->vars)[numvarsinit].name=NULL; } /* process a piece of code defined in *code with the actions list defined in *action */ void SOAP_process_code(char *code, SOAP_codex_t *codex, int SOAP_VARS){ long cnt,cnt2,codelength,numvarsinit,parentheses; bool ASSIGN,INSTRING; char *action,*argum; SOAP_count_all_vars(codex,&numvarsinit); clean_code(codex,code); /* ShowCode(code); */ codelength=(long)strlen(code); if (codelength>0) { cnt=0; action=(char *)malloc(sizeof(char)); argum=(char *)malloc(sizeof(char)); do { /* get action string and argument string*/ cnt2=0; while (code[cnt]!='(' && code[cnt]!='=' && code[cnt]!=EOS && code[cnt]!=';' && code[cnt]!='"') { action=(char *)realloc(action,(cnt2+2)*sizeof(char)); action[cnt2]=code[cnt]; cnt++; cnt2++; } if (code[cnt]==EOS || code[cnt]==';' || code[cnt]=='"') { SOAP_fatal_error(codex,"Action name not followed by '(' or '='."); } if (code[cnt]=='=') ASSIGN=TRUE; else ASSIGN=FALSE; action[cnt2]=EOS; cnt++; cnt2=0; if (ASSIGN) parentheses=0; else parentheses=1; INSTRING=FALSE; while ((code[cnt]!=';' || parentheses>0 || INSTRING) && code[cnt]!=EOS) { argum=(char *)realloc(argum,(cnt2+2)*sizeof(char)); argum[cnt2]=code[cnt]; if (code[cnt]=='"') INSTRING=!INSTRING; if (code[cnt]=='(') parentheses++; if (code[cnt]==')') parentheses--; cnt++; cnt2++; } if (!ASSIGN){ codex->ACTIONPROCESSED=FALSE; codex->action_being_processed=realloc(codex->action_being_processed,sizeof(char) * (2+strlen(action))); strcpy(codex->action_being_processed,action); } if (parentheses>0) { SOAP_fatal_error(codex,"Missing ')' ."); } if (parentheses<0) { SOAP_fatal_error(codex,"Too many ')' ."); } if (code[cnt]==EOS) { SOAP_fatal_error(codex,"Expecting ';' ."); } cnt++; if (ASSIGN) argum[cnt2]=EOS; else argum[cnt2-1]=EOS; /* if (codex->VERBOSE) printf("action='%s' argum='%s'\n",action,argum); */ if (ASSIGN) { substitute_functions(&argum, codex); update_var(&action,&argum,codex); } else { if (codex->ACTION) (codex->action)(action,&argum,codex); builtin_actions(action,&argum,codex); if (!codex->ACTIONPROCESSED && codex->SCREENOUTPUT) fprintf(stdout,"%s ignored..",action); free(codex->action_being_processed); codex->action_being_processed=NULL; } } while (cnt<codelength); free(action); free(argum); } if (SOAP_VARS==SOAP_VARS_CLEAN_ADDED) { SOAP_clean_added_vars(codex, numvarsinit); } if (SOAP_VARS==SOAP_VARS_CLEAN_ALL) { fprintf(stdout,"SOAP_VARS_CLEAN_ALL not yet implemented in soap.c.\n"); } }
GB_binop__eq_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__eq_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__eq_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint64) // A*D function (colscale): GB (_AxD__eq_uint64) // D*A function (rowscale): GB (_DxB__eq_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint64) // C=scalar+B GB (_bind1st__eq_uint64) // C=scalar+B' GB (_bind1st_tran__eq_uint64) // C=A+scalar GB (_bind2nd__eq_uint64) // C=A'+scalar GB (_bind2nd_tran__eq_uint64) // C type: bool // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT64 || GxB_NO_EQ_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_loop1.c
#include <stdio.h> #include <omp.h> int main() { int i; #pragma omp parallel num_threads(2) #pragma omp for for (i = 0; i < 8; i++) { printf("[%d] Hello OpenMP (%d)\n", i, omp_get_thread_num()); } return 0; }
GB_binop__rminus_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_uint64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__rminus_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint64) // A*D function (colscale): GB (_AxD__rminus_uint64) // D*A function (rowscale): GB (_DxB__rminus_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint64) // C=scalar+B GB (_bind1st__rminus_uint64) // C=scalar+B' GB (_bind1st_tran__rminus_uint64) // C=A+scalar GB (_bind2nd__rminus_uint64) // C=A'+scalar GB (_bind2nd_tran__rminus_uint64) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_UINT64 || GxB_NO_RMINUS_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rminus_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = Ax [p] ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_matrix_matrix.c
#include <omp.h> #include <stdio.h> #define R_1 300 #define R_2 300 #define C_1 300 #define C_2 300 int main () { int m = R_1, n = C_1, p = R_2, q = C_2, c, d, k; double first[R_1][C_1] = {0}; double second[R_2][C_2] = {0}; double multiply[R_1][C_2] = {0}; for ( c = 0 ; c < m ; c++ ) for ( d = 0 ; d < n ; d++ ) first[c][d] = 1; for ( c = 0 ; c < p ; c++ ) for ( d = 0 ; d < q ; d++ ) second[c][d] = 1 ; if (n != p ){ return -1; } double start = omp_get_wtime(); #pragma omp parallel shared(first,second,multiply) private(c,d,k) { #pragma omp for schedule(static) for ( c = 0 ; c < m ; c++ ) { for ( d = 0 ; d < q ; d++ ) { for ( k = 0 ; k < p ; k++ ){ //define atomic and wil be more slow #pragma omp atomic multiply[c][d] += first[c][k] * second[k][d]; } } } } double end = omp_get_wtime(); printf("start time = %f\n",start); printf("end time = %f\n",end); printf("diff time = %f\n",end - start); return 0; }
hill_climbing_engine.h
//===------------------------------------------------------------*- C++ -*-===// // // Ripples: A C++ Library for Influence Maximization // Marco Minutoli <marco.minutoli@pnnl.gov> // Pacific Northwest National Laboratory // //===----------------------------------------------------------------------===// // // Copyright (c) 2019, Battelle Memorial Institute // // Battelle Memorial Institute (hereinafter Battelle) hereby grants permission // to any person or entity lawfully obtaining a copy of this software and // associated documentation files (hereinafter “the Software”) to redistribute // and use the Software in source and binary forms, with or without // modification. Such person or entity may use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and may permit // others to do so, subject to the following conditions: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimers. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Other than as used herein, neither the name Battelle Memorial Institute or // Battelle may be used in any form whatsoever without the express written // consent of Battelle. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // //===----------------------------------------------------------------------===// #ifndef RIPPLES_HILL_CLIMBING_ENGINE_H #define RIPPLES_HILL_CLIMBING_ENGINE_H #include <algorithm> #include <atomic> #include <cstdint> #include <memory> #include <vector> #include "omp.h" #include "spdlog/sinks/stdout_color_sinks.h" #include "spdlog/spdlog.h" #include "trng/uniform01_dist.hpp" #include "ripples/bitmask.h" #ifdef RIPPLES_ENABLE_CUDA #include "ripples/cuda/cuda_generate_rrr_sets.h" #include "ripples/cuda/cuda_graph.cuh" #include "ripples/cuda/cuda_hc_engine.h" #include "ripples/cuda/cuda_utils.h" #include "ripples/cuda/from_nvgraph/hc/bfs.hxx" #endif namespace ripples { //! Engine scheduling dynamically sampling tasks for the Hill Climbing. //! //! \tparam GraphTy The type of the input graph. //! \tparam ItrTy The type of the workload iterator. template <typename GraphTy, typename ItrTy> class HCWorker { public: using ex_time_ms = std::chrono::duration<double, std::milli>; //! Construct the Sampling worker. //! \param G The input Graph. HCWorker(const GraphTy &G) : G_(G) {} //! Destructor. virtual ~HCWorker() = default; virtual void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy B, ItrTy E, std::vector<ex_time_ms> &record) = 0; protected: const GraphTy &G_; }; template <typename GraphTy, typename ItrTy, typename PRNG, typename diff_model_tag> class HCCPUSamplingWorker : public HCWorker<GraphTy, ItrTy> { using vertex_type = typename GraphTy::vertex_type; using HCWorker<GraphTy, ItrTy>::G_; public: using ex_time_ms = std::chrono::duration<double, std::milli>; HCCPUSamplingWorker(const GraphTy &G, const PRNG &rng) : HCWorker<GraphTy, ItrTy>(G), rng_(rng), UD_() {} void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy B, ItrTy E, std::vector<ex_time_ms> &record) { size_t offset = 0; while ((offset = mpmc_head.fetch_add(batch_size_)) < std::distance(B, E)) { auto first = B; std::advance(first, offset); auto last = first; std::advance(last, batch_size_); if (last > E) last = E; auto start = std::chrono::high_resolution_clock::now(); batch(first, last); auto end = std::chrono::high_resolution_clock::now(); record.push_back(end - start); } } private: void batch(ItrTy B, ItrTy E) { for (; B != E; ++B) { size_t edge_number = 0; if (std::is_same<diff_model_tag, independent_cascade_tag>::value) { for (vertex_type v = 0; v < G_.num_nodes(); ++v) { for (auto &e : G_.neighbors(v)) { // (*B)[edge_number] = UD_(rng_) <= e.weight ? 1 : 0; if (UD_(rng_) <= e.weight) B->set(edge_number); ++edge_number; } } } else if (std::is_same<diff_model_tag, linear_threshold_tag>::value) { for (vertex_type v = 0; v < G_.num_nodes(); ++v) { double threshold = UD_(rng_); for (auto &e : G_.neighbors(v)) { threshold -= e.weight; if (threshold <= 0) B->set(edge_number); // (*B)[edge_number] = threshold <= 0 ? 1 : 0; ++edge_number; } } } } } static constexpr size_t batch_size_ = 32; PRNG rng_; trng::uniform01_dist<float> UD_; }; template <typename GraphTy, typename ItrTy, typename PRNGTy, typename diff_model_tag> class HCGPUSamplingWorker : public HCWorker<GraphTy, ItrTy> { #ifdef RIPPLES_ENABLE_CUDA using HCWorker<GraphTy, ItrTy>::G_; public: using ex_time_ms = std::chrono::duration<double, std::milli>; struct config_t { static constexpr size_t block_size_ = 256; static constexpr size_t num_threads_ = 1 << 15; size_t max_blocks_{0}; config_t() : max_blocks_(num_threads_ / block_size_) {} size_t num_gpu_threads() const { return num_threads_; } }; HCGPUSamplingWorker(const GraphTy &G, PRNGTy &rng, cuda_ctx<GraphTy> *ctx) : HCWorker<GraphTy, ItrTy>(G), ctx_(ctx), conf_(), master_rng_(rng) { cuda_set_device(ctx_->gpu_id); cuda_stream_create(&cuda_stream_); cuda_malloc((void **)&d_trng_state_, conf_.num_gpu_threads() * sizeof(PRNGTy)); cuda_malloc((void **)&d_flags_, ((G.num_edges() / (8 * sizeof(int)) + 1) * sizeof(int) * batch_size_)); } ~HCGPUSamplingWorker() { cuda_set_device(ctx_->gpu_id); cuda_stream_destroy(cuda_stream_); cuda_free(d_trng_state_); cuda_free(d_flags_); } void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy B, ItrTy E, std::vector<ex_time_ms> &record) { size_t offset = 0; while ((offset = mpmc_head.fetch_add(batch_size_)) < std::distance(B, E)) { auto first = B; std::advance(first, offset); auto last = first; std::advance(last, batch_size_); if (last > E) last = E; auto start = std::chrono::high_resolution_clock::now(); batch(first, last); auto end = std::chrono::high_resolution_clock::now(); record.push_back(end - start); } } void rng_setup() { cuda_set_device(ctx_->gpu_id); cuda_lt_rng_setup(d_trng_state_, master_rng_, conf_.num_gpu_threads(), 0, conf_.max_blocks_, conf_.block_size_); } private: void batch(ItrTy B, ItrTy E) { cuda_set_device(ctx_->gpu_id); if (std::is_same<diff_model_tag, independent_cascade_tag>::value) { cuda_generate_samples_ic(conf_.max_blocks_, conf_.block_size_, batch_size_, G_.num_edges(), d_trng_state_, ctx_, d_flags_, cuda_stream_); } else if (std::is_same<diff_model_tag, linear_threshold_tag>::value) { assert(false && "Not Yet Implemented"); } for (size_t i = 0; B < E; ++B, ++i) { cuda_d2h(B->data(), d_flags_ + i * (B->bytes() / sizeof(int)), B->bytes(), cuda_stream_); } cuda_sync(cuda_stream_); } static constexpr size_t batch_size_ = 32; cuda_ctx<GraphTy> *ctx_; config_t conf_; PRNGTy master_rng_; cudaStream_t cuda_stream_; trng::uniform01_dist<float> UD_; PRNGTy *d_trng_state_; int *d_flags_; #endif }; template <typename GraphTy, typename ItrTy, typename PRNGTy, typename diff_model_tag, typename CpuWorkerTy, typename GpuWorkerTy> class PhaseEngine { using vertex_type = typename GraphTy::vertex_type; using worker_type = HCWorker<GraphTy, ItrTy>; using cpu_worker_type = CpuWorkerTy; using gpu_worker_type = GpuWorkerTy; public: using ex_time_ms = std::chrono::duration<double, std::milli>; PhaseEngine(const GraphTy &G, PRNGTy &master_rng, size_t cpu_workers, size_t gpu_workers, std::string loggerName) : G_(G), logger_(spdlog::stdout_color_mt(loggerName)) { size_t num_threads = cpu_workers + gpu_workers; // Construct workers. logger_->debug("Number of Threads = {}", num_threads); workers_.resize(num_threads); cpu_workers_.resize(cpu_workers); #if RIPPLES_ENABLE_CUDA gpu_workers_.resize(gpu_workers); cuda_contexts_.resize(gpu_workers); #endif #pragma omp parallel { int rank = omp_get_thread_num(); if (rank < cpu_workers) { auto rng = master_rng; rng.split(num_threads, rank); auto w = new cpu_worker_type(G_, rng); workers_[rank] = w; cpu_workers_[rank] = w; logger_->debug("> mapping: omp {}\t->CPU", rank); } else { #if RIPPLES_ENABLE_CUDA size_t num_devices = cuda_num_devices(); size_t device_id = rank % num_devices; logger_->debug("> mapping: omp {}\t->GPU {}/{}", rank, device_id, num_devices); logger_->trace("Building Cuda Context"); cuda_contexts_[rank - cpu_workers] = cuda_make_ctx(G, device_id); auto rng = master_rng; rng.split(num_threads, rank); auto w = new gpu_worker_type(G_, rng, cuda_contexts_[rank - cpu_workers]); w->rng_setup(); workers_[rank] = w; gpu_workers_[rank - cpu_workers] = w; logger_->trace("Cuda Context Built!"); #endif } } } ~PhaseEngine() { // Free workers. for (auto &v : workers_) delete v; #if RIPPLES_ENABLE_CUDA for (auto ctx : cuda_contexts_) { cuda_set_device(ctx->gpu_id); cuda_destroy_ctx(ctx); delete ctx; } #endif } protected: const GraphTy &G_; std::shared_ptr<spdlog::logger> logger_; std::vector<cpu_worker_type *> cpu_workers_; #if RIPPLES_ENABLE_CUDA std::vector<gpu_worker_type *> gpu_workers_; std::vector<cuda_ctx<GraphTy> *> cuda_contexts_; #endif std::vector<worker_type *> workers_; std::atomic<size_t> mpmc_head_{0}; }; template <typename GraphTy, typename ItrTy, typename PRNGTy, typename diff_model_tag> class SamplingEngine : public PhaseEngine< GraphTy, ItrTy, PRNGTy, diff_model_tag, HCCPUSamplingWorker<GraphTy, ItrTy, PRNGTy, diff_model_tag>, HCGPUSamplingWorker<GraphTy, ItrTy, PRNGTy, diff_model_tag>> { using phase_engine = PhaseEngine<GraphTy, ItrTy, PRNGTy, diff_model_tag, HCCPUSamplingWorker<GraphTy, ItrTy, PRNGTy, diff_model_tag>, HCGPUSamplingWorker<GraphTy, ItrTy, PRNGTy, diff_model_tag>>; using ex_time_ms = std::chrono::duration<double, std::milli>; public: SamplingEngine(const GraphTy &G, PRNGTy &master_rng, size_t cpu_workers, size_t gpu_workers) : phase_engine(G, master_rng, cpu_workers, gpu_workers, "SamplingEngine") {} void exec(ItrTy B, ItrTy E, std::vector<std::vector<ex_time_ms>> &record) { record.resize(workers_.size()); mpmc_head_.store(0); logger_->trace("Start Sampling"); #pragma omp parallel { assert(workers_.size() == omp_get_num_threads()); size_t rank = omp_get_thread_num(); workers_[rank]->svc_loop(mpmc_head_, B, E, record[rank]); } logger_->trace("End Sampling"); } private: using phase_engine::logger_; using phase_engine::mpmc_head_; using phase_engine::workers_; }; namespace { template <typename GraphTy, typename GraphMaskTy, typename Itr> size_t BFS(GraphTy &G, GraphMaskTy &M, Itr b, Itr e, Bitmask<int> &visited) { using vertex_type = typename GraphTy::vertex_type; std::queue<vertex_type> queue; for (; b != e; ++b) { queue.push(*b); } while (!queue.empty()) { vertex_type u = queue.front(); queue.pop(); visited.set(u); size_t edge_number = std::distance(G.neighbors(0).begin(), G.neighbors(u).begin()); for (auto v : G.neighbors(u)) { if (M.get(edge_number) && !visited.get(v.vertex)) { queue.push(v.vertex); } ++edge_number; } } return visited.popcount(); } template <typename GraphTy, typename GraphMaskTy> size_t BFS(GraphTy &G, GraphMaskTy &M, typename GraphTy::vertex_type v, Bitmask<int> visited) { using vertex_type = typename GraphTy::vertex_type; std::queue<vertex_type> queue; queue.push(v); visited.set(v); while (!queue.empty()) { vertex_type u = queue.front(); queue.pop(); size_t edge_number = std::distance(G.neighbors(0).begin(), G.neighbors(u).begin()); for (auto v : G.neighbors(u)) { if (M.get(edge_number) && !visited.get(v.vertex)) { queue.push(v.vertex); visited.set(v.vertex); } ++edge_number; } } return visited.popcount(); } } // namespace template <typename GraphTy, typename ItrTy> class HCCPUCountingWorker : public HCWorker<GraphTy, ItrTy> { using vertex_type = typename GraphTy::vertex_type; using HCWorker<GraphTy, ItrTy>::G_; public: using ex_time_ms = std::chrono::duration<double, std::milli>; HCCPUCountingWorker(const GraphTy &G, std::vector<size_t> &count, const std::set<vertex_type> &S) : HCWorker<GraphTy, ItrTy>(G), count_(count), S_(S) {} void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy B, ItrTy E, std::vector<ex_time_ms> &record) { size_t offset = 0; while ((offset = mpmc_head.fetch_add(batch_size_)) < std::distance(B, E)) { auto first = B; std::advance(first, offset); auto last = first; std::advance(last, batch_size_); if (last > E) last = E; auto start = std::chrono::high_resolution_clock::now(); batch(first, last); auto end = std::chrono::high_resolution_clock::now(); record.push_back(end - start); } } private: void batch(ItrTy B, ItrTy E) { for (auto itr = B; itr < E; ++itr) { Bitmask<int> visited(G_.num_nodes()); size_t base_count = BFS(G_, *itr, S_.begin(), S_.end(), visited); for (vertex_type v = 0; v < G_.num_nodes(); ++v) { if (S_.find(v) != S_.end()) continue; size_t update_count = base_count + 1; if (!visited.get(v)) { update_count = BFS(G_, *itr, v, visited); } #pragma omp atomic count_[v] += update_count; } } } static constexpr size_t batch_size_ = 2; std::vector<size_t> &count_; const std::set<vertex_type> &S_; }; template <typename GraphTy, typename ItrTy> class HCGPUCountingWorker : public HCWorker<GraphTy, ItrTy> { #ifdef RIPPLES_ENABLE_CUDA using vertex_type = typename GraphTy::vertex_type; using d_vertex_type = typename cuda_device_graph<GraphTy>::vertex_t; using bfs_solver_t = nvgraph::Bfs<int>; using HCWorker<GraphTy, ItrTy>::G_; public: using ex_time_ms = std::chrono::duration<double, std::milli>; struct config_t { config_t(size_t num_workers) : block_size_(bfs_solver_t::traverse_block_size()), max_blocks_(num_workers ? cuda_max_blocks() / num_workers : 0) { auto console = spdlog::get("console"); console->trace( "> [GPUWalkWorkerIC::config_t] " "max_blocks_={}\tblock_size_={}", max_blocks_, block_size_); } size_t num_gpu_threads() const { return max_blocks_ * block_size_; } const size_t max_blocks_; const size_t block_size_; }; HCGPUCountingWorker(const config_t &conf, const GraphTy &G, cuda_ctx<GraphTy> *ctx, std::vector<size_t> &count, const std::set<vertex_type> &S) : HCWorker<GraphTy, ItrTy>(G), conf_(conf), ctx_(ctx), count_(count), S_(S), edge_filter_(new d_vertex_type[G_.num_edges()]) { cuda_set_device(ctx_->gpu_id); cuda_stream_create(&cuda_stream_); // allocate host/device memory Bitmask<int> _(G_.num_edges()); cuda_malloc((void **)&d_edge_filter_, _.bytes()); // create the solver solver_ = new bfs_solver_t(this->G_.num_nodes(), this->G_.num_edges(), cuda_graph_index(ctx_), cuda_graph_edges(ctx_), cuda_graph_weights(ctx_), true, TRAVERSAL_DEFAULT_ALPHA, TRAVERSAL_DEFAULT_BETA, conf_.max_blocks_, cuda_stream_); solver_->configure(nullptr, nullptr, d_edge_filter_); visited_ = std::unique_ptr<int[]>(new int[solver_->bmap_size()]); cuda_sync(cuda_stream_); } ~HCGPUCountingWorker() { cuda_set_device(ctx_->gpu_id); delete solver_; cuda_stream_destroy(cuda_stream_); // free host/device memory cuda_free(d_edge_filter_); } void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy B, ItrTy E, std::vector<ex_time_ms> &record) { size_t offset = 0; while ((offset = mpmc_head.fetch_add(batch_size_)) < std::distance(B, E)) { auto first = B; std::advance(first, offset); auto last = first; std::advance(last, batch_size_); if (last > E) last = E; auto start = std::chrono::high_resolution_clock::now(); batch(first, last); auto end = std::chrono::high_resolution_clock::now(); record.push_back(end - start); } } private: void batch(ItrTy B, ItrTy E) { std::vector<d_vertex_type> seeds(S_.begin(), S_.end()); for (auto itr = B; itr < E; ++itr) { cuda_h2d(d_edge_filter_, itr->data(), itr->bytes(), cuda_stream_); d_vertex_type base_count; solver_->traverse(seeds.data(), seeds.size(), visited_.get(), &base_count); // cuda_d2h(predecessors_, d_predecessors_, // G_.num_nodes() * sizeof(d_vertex_type), cuda_stream_); cuda_sync(cuda_stream_); for (vertex_type v = 0; v < G_.num_nodes(); ++v) { if (S_.find(v) != S_.end()) continue; size_t update_count = base_count + 1; int m = 1 << (v % (8 * sizeof(int))); if ((visited_[v / (8 * sizeof(int))] && m) == 0) { d_vertex_type count; solver_->traverse(v, base_count, visited_.get(), &count); cuda_sync(cuda_stream_); update_count = count; } #pragma omp atomic count_[v] += update_count; } } } static constexpr size_t batch_size_ = 2; config_t conf_; cuda_ctx<GraphTy> *ctx_; cudaStream_t cuda_stream_; bfs_solver_t *solver_; std::unique_ptr<d_vertex_type[]> edge_filter_; std::unique_ptr<int[]> visited_; d_vertex_type *d_edge_filter_; std::vector<size_t> &count_; const std::set<vertex_type> &S_; #endif }; template <typename GraphTy, typename ItrTy> class SeedSelectionEngine { using vertex_type = typename GraphTy::vertex_type; using worker_type = HCWorker<GraphTy, ItrTy>; using cpu_worker_type = HCCPUCountingWorker<GraphTy, ItrTy>; using gpu_worker_type = HCGPUCountingWorker<GraphTy, ItrTy>; public: using ex_time_ms = std::chrono::duration<double, std::milli>; SeedSelectionEngine(const GraphTy &G, size_t cpu_workers, size_t gpu_workers) : G_(G), count_(G_.num_nodes()), S_(), logger_(spdlog::stdout_color_mt("SeedSelectionEngine")) { size_t num_threads = cpu_workers + gpu_workers; // Construct workers. logger_->debug("Number of Threads = {}", num_threads); workers_.resize(num_threads); cpu_workers_.resize(cpu_workers); #if RIPPLES_ENABLE_CUDA gpu_workers_.resize(gpu_workers); cuda_contexts_.resize(gpu_workers); #endif #pragma omp parallel { int rank = omp_get_thread_num(); if (rank < cpu_workers) { auto w = new cpu_worker_type(G_, count_, S_); workers_[rank] = w; cpu_workers_[rank] = w; logger_->debug("> mapping: omp {}\t->CPU", rank); } else { #if RIPPLES_ENABLE_CUDA size_t num_devices = cuda_num_devices(); size_t device_id = rank % num_devices; logger_->debug("> mapping: omp {}\t->GPU {}/{}", rank, device_id, num_devices); logger_->trace("Building Cuda Context"); cuda_contexts_[rank - cpu_workers] = cuda_make_ctx(G, device_id); typename gpu_worker_type::config_t gpu_conf(gpu_workers); auto w = new gpu_worker_type(gpu_conf, G_, cuda_contexts_.back(), count_, S_); workers_[rank] = w; gpu_workers_[rank - cpu_workers] = w; logger_->trace("Cuda Context Built!"); #endif } } } ~SeedSelectionEngine() { // Free workers. for (auto &v : workers_) delete v; #if RIPPLES_ENABLE_CUDA for (auto ctx : cuda_contexts_) { cuda_set_device(ctx->gpu_id); cuda_destroy_ctx(ctx); delete ctx; } #endif } std::vector<vertex_type> exec(ItrTy B, ItrTy E, size_t k, std::vector<std::vector<ex_time_ms>> &record) { logger_->trace("Start Seed Selection"); record.resize(workers_.size()); std::vector<vertex_type> result; result.reserve(k); for (size_t i = 0; i < k; ++i) { #pragma omp parallel for for (size_t j = 0; j < count_.size(); ++j) count_[j] = 0; mpmc_head_.store(0); #pragma omp parallel { assert(workers_.size() == omp_get_num_threads()); size_t rank = omp_get_thread_num(); workers_[rank]->svc_loop(mpmc_head_, B, E, record[rank]); } auto itr = std::max_element(count_.begin(), count_.end()); vertex_type v = std::distance(count_.begin(), itr); S_.insert(v); result.push_back(v); logger_->trace("Seed {} : {}[{}] = {}", i, v, G_.convertID(v), *itr); } logger_->trace("End Seed Selection"); return result; } private: const GraphTy &G_; std::vector<size_t> count_; std::set<vertex_type> S_; // size_t gpu_workers_; // size_t cpu_workers_; std::shared_ptr<spdlog::logger> logger_; std::vector<cpu_worker_type *> cpu_workers_; #if RIPPLES_ENABLE_CUDA std::vector<gpu_worker_type *> gpu_workers_; std::vector<cuda_ctx<GraphTy> *> cuda_contexts_; #endif std::vector<worker_type *> workers_; std::atomic<size_t> mpmc_head_{0}; }; } // namespace ripples #endif
eigrp_fmt_plug.c
/* * Cracker for EIGRP (Cisco's proprietary routing protocol) MD5 + HMAC-SHA-256 authentication. * http://tools.ietf.org/html/draft-savage-eigrp-00 * * This is dedicated to Darya. You inspire me. * * This software is Copyright (c) 2014, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_eigrp; #elif FMT_REGISTERS_H john_register_one(&fmt_eigrp); #else #include <string.h> #ifdef _OPENMP #include <omp.h> // OMP_SCALE on Intel core i7 // 2048 - 12030k/11596k // 4096 - 12575k/13114k // 8192 - 13316k/13921k // 16k - 13547k/14458k // 32k - 16106k/14700k // 64k - 16106k/14700k // 64k - 16674k/14674k // 128k - 17795k/14663k --test=0 has a tiny delay, but not bad. #define OMP_SCALE 131072 #endif #include "arch.h" #include "md5.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "memdbg.h" #include "escrypt/sha256.h" #define FORMAT_LABEL "eigrp" #define FORMAT_NAME "EIGRP MD5 / HMAC-SHA-256 authentication" #define FORMAT_TAG "$eigrp$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 81 // IOU accepts larger strings but doesn't use them fully, passwords are zero padded to a minimum length of 16 (for MD5 hashes only)! #define BINARY_SIZE 16 // MD5 hash or first 16 bytes of HMAC-SHA-256 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(int) #define MAX_SALT_SIZE 1024 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define HEXCHARS "0123456789abcdef" static struct fmt_tests tests[] = { {"$eigrp$2$020500000000000000000000000000000000002a000200280002001000000001000000000000000000000000$0$x$1a42aaf8ebe2f766100ea1fa05a5fa55", "password12345"}, {"$eigrp$2$020500000000000000000000000000000000002a000200280002001000000001000000000000000000000000$0$x$f29e7d44351d37e6fc71e2aacca63d28", "1234567812345"}, {"$eigrp$2$020500000000000000000000000000000000002a000200280002001000000001000000000000000000000000$1$0001000c010001000000000f000400080500030000f5000c0000000400$560c87396267310978883da92c0cff90", "password12345"}, {"$eigrp$2$020500000000000000000000000000000000002a000200280002001000000001000000000000000000000000$0$x$61f237e29d28538a372f01121f2cd12f", "123456789012345678901234567890"}, {"$eigrp$2$0205000000000000000000000000000000000001000200280002001000000001000000000000000000000000$0$x$212acb1cb76b31a810a9752c5cf6f554", "ninja"}, // this one is for @digininja :-) {"$eigrp$3$020500000000000000000000000000000000000a00020038000300200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000c010001000000000f000400080f00020000f5000a000000020000$0$x$1$10.0.0.2$cff66484cea20c6f58f175f8c004fc6d73be72090e53429c2616309aca38d5f3", "password12345"}, // HMAC-SHA-256 hash {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *saved_len; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { int length; int algo_type; int have_extra_salt; int extra_salt_length; unsigned char salt[MAX_SALT_SIZE]; char ip[45 + 1]; int ip_length; MD5_CTX prep_salt; unsigned char extra_salt[MAX_SALT_SIZE]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_len = mem_calloc_tiny(sizeof(*saved_len) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *ptrkeep; int res; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return 0; ptrkeep = strdup(ciphertext); p = &ptrkeep[TAG_LENGTH]; if ((p = strtok(p, "$")) == NULL) goto err; res = atoi(p); if (res != 2 && res != 3) // MD5 hashes + HMAC-SHA256 hashes goto err; if ((p = strtok(NULL, "$")) == NULL) // salt goto err; if (strlen(p) > MAX_SALT_SIZE*2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "$")) == NULL) goto err; res = atoi(p); if (p[1] || res > 1) goto err; if ((p = strtok(NULL, "$")) == NULL) // salt2 (or a junk field) goto err; if (res == 1) { // we only care about extra salt IF that number was a 1 if (strlen(p) > MAX_SALT_SIZE*2) goto err; if (!ishex(p)) goto err; } if ((p = strtok(NULL, "$")) == NULL) // binary hash (or IP) goto err; if (!strcmp(p, "1")) { // this was an IP if ((p = strtok(NULL, "$")) == NULL) // IP goto err; // not doing too much IP validation. Length will have to do. // 5 char ip 'could' be 127.1 I know of no short IP. 1.1.1.1 is longer. if (strlen(p) < 5 || strlen(p) > sizeof(cur_salt->ip)) goto err; if ((p = strtok(NULL, "$")) == NULL) // ok, now p is binary. goto err; } res = strlen(p); if (res != BINARY_SIZE * 2 && res != 32 * 2) goto err; if (!ishex(p)) goto err; MEM_FREE(ptrkeep); return 1; err: MEM_FREE(ptrkeep); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i, len; char *p, *q; memset(&cs, 0, SALT_SIZE); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; p = ciphertext; cs.algo_type = atoi(p); p = p + 2; // salt start q = strchr(p, '$'); len = (q - p) / 2; cs.length = len; for (i = 0; i < len; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; q = q + 1; cs.have_extra_salt = atoi(q); if (cs.have_extra_salt == 1) { p = q + 2; q = strchr(p, '$'); cs.extra_salt_length = (q - p) / 2; for (i = 0; i < cs.extra_salt_length; i++) cs.extra_salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } else { /* skip over extra_salt */ p = q + 2; q = strchr(p, '$'); } /* dirty hack for HMAC-SHA-256 support */ if (*q == '$' && *(q+1) == '1' && *(q+2) == '$') { /* IP destination field */ p = q + 3; q = strchr(p, '$'); cs.ip_length = q - p; strncpy(cs.ip, p, cs.ip_length); } /* Better do this once than 10 million times per second */ if (cs.algo_type == 2) { MD5_Init(&cs.prep_salt); MD5_Update(&cs.prep_salt, cs.salt, cs.length); } return &cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static unsigned char zeropad[16] = {0}; static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { MD5_CTX ctx; if (cur_salt->algo_type == 2) { memcpy(&ctx, &cur_salt->prep_salt, sizeof(MD5_CTX)); MD5_Update(&ctx, saved_key[index], saved_len[index]); if (saved_len[index] < 16) { MD5_Update(&ctx, zeropad, 16 - saved_len[index]); } // do we have extra_salt? if (cur_salt->have_extra_salt) { MD5_Update(&ctx, cur_salt->extra_salt, cur_salt->extra_salt_length); } MD5_Final((unsigned char*)crypt_out[index], &ctx); } else { HMAC_SHA256_CTX hctx[1]; unsigned char output[32]; unsigned char buffer[1 + PLAINTEXT_LENGTH + 45 + 1] = { 0 }; // HMAC key ==> '\n' + password + IP address buffer[0] = '\n'; // WTF? memcpy(buffer + 1, saved_key[index], saved_len[index]); memcpy(buffer + 1 + saved_len[index], cur_salt->ip, cur_salt->ip_length); HMAC__SHA256_Init(hctx, buffer, 1 + saved_len[index] + cur_salt->ip_length); HMAC__SHA256_Update(hctx, cur_salt->salt, cur_salt->length); HMAC__SHA256_Final(output, hctx); memcpy((unsigned char*)crypt_out[index], output, BINARY_SIZE); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void eigrp_set_key(char *key, int index) { saved_len[index] = strnzcpyn(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } #if FMT_MAIN_VERSION > 11 static unsigned int get_cost(void *salt) { return (unsigned int)((struct custom_salt*)salt)->algo_type; } #endif struct fmt_main fmt_eigrp = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { "algorithm [2:MD5 3:HMAC-SHA-256]", }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { get_cost, }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, eigrp_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif
atomic.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> main(int argc, char **argv){ int i, n=20, a[n],suma=0,sumalocal; if(argc<2){ fprintf(stderr,"\nFalta iteraciones\n"); exit(-1); } n=atoi(argv[1]); if (n>20) n=20; for(i=0;i<n;i++) a[i]=i; #pragma omp parallel private(sumalocal) {sumalocal=0; #pragma omp for schedule(static) for(i=0;i<n;i++) { sumalocal+=a[i]; printf("thread %d suma de a[%d]=%d sumalocal=%d\n", omp_get_thread_num(),i,a[i],sumalocal); } #pragma omp atomic suma = suma + sumalocal; } printf("Fuera de 'parallel' suma=%d\n",suma); return(0); }
rawKeccak_512_fmt_plug.c
/* Keccak-512 cracker patch for JtR. Hacked together during January of 2013 * by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This file is part of John the Ripper password cracker, * Copyright (c) 2012 by Solar Designer * based on rawMD4_fmt.c code, with trivial changes by groszek. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawKeccak; #elif FMT_REGISTERS_H john_register_one(&fmt_rawKeccak); #else #include <string.h> #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "KeccakHash.h" #ifdef _OPENMP #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "Raw-Keccak" #define FORMAT_NAME "" #define FORMAT_TAG "$keccak$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 128 #define BINARY_SIZE 64 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"0eab42de4c3ceb9235fc91acffe746b29c29a8c366b7c60e4e67c466f36a4304c00fa9caf9d87976ba469bcbe06713b435f091ef2769fb160cdab33d3670680e", ""}, {"$keccak$d135bb84d0439dbac432247ee573a23ea7d3c9deb2a968eb31d47c4fb45f1ef4422d6c531b5b9bd6f449ebcc449ea94d0a8f05f62130fda612da53c79659f609", "The quick brown fox jumps over the lazy dog"}, {"$keccak$e4a7e8f5572f4853ef26a862f31687c249b1cd7922df2aac1f4348d8ceef944c74d1949e3465704a5f3f89fb53e0dcce3ea142c90af04c84cc7e548f144f8f0b", "abcd"}, {"$keccak$b7c090825b238d33cff5c92075f4dd80ce1b36359ce399ce9fce2a2d91232d5a494a58c37f489c3c859b779b3740cd7791d7666793779ee5c67476d31f91c814", "UPPERCASE"}, {"$keccak$40b787e94778266fb196a73b7a77edf9de2ef172451a2b87531324812250df8f26fcc11e69b35afddbe639956c96153e71363f97010bc99405dd2d77b8c41986", "123456789"}, {NULL} }; static int (*saved_len); // the Keccak function can read up to next even 8 byte offset. // making the buffer larger avoid reading past end of buffer static char (*saved_key)[(((PLAINTEXT_LENGTH+1)+7)/8)*8]; static uint32_t (*crypt_out) [(BINARY_SIZE + sizeof(uint32_t) - 1) / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(saved_len); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; if (!strncmp(p, FORMAT_TAG, FORMAT_TAG_LEN)) p += FORMAT_TAG_LEN; q = p; while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p == CIPHERTEXT_LENGTH; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[FORMAT_TAG_LEN + CIPHERTEXT_LENGTH + 1]; if (!strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) ciphertext += FORMAT_TAG_LEN; memcpy(out, FORMAT_TAG, FORMAT_TAG_LEN); memcpy(out + FORMAT_TAG_LEN, ciphertext, CIPHERTEXT_LENGTH + 1); strlwr(out + FORMAT_TAG_LEN); return out; } static void *get_binary(char *ciphertext) { static unsigned char *out; char *p; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext + FORMAT_TAG_LEN; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_len[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { Keccak_HashInstance hash; Keccak_HashInitialize(&hash, 576, 1024, 512, 0x01); Keccak_HashUpdate(&hash, (unsigned char*)saved_key[index], saved_len[index] * 8); Keccak_HashFinal(&hash, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_rawKeccak = { { FORMAT_LABEL, FORMAT_NAME, "Keccak 512 " ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
nest.c
#include<stdio.h> #include<omp.h> // DOESN'T WORK!! int main(int argc, char *argv[]){ int nThreads = 4; omp_set_num_threads(nThreads); omp_set_nested(1); int n = 0; scanf("%d", &n); #pragma omp parallel { #pragma omp for for(int i = 0; i < n; i++){ #pragma omp for for(int j = 0; j < n; j++){ printf("Hello from thread #%d iteration i#%d j#%d\n", omp_get_thread_num(), i, j); } } } return 0; }
osc.c
// A Splitting Method for Optimal Control // by Brendan O'Donoghue, George Stathopoulos and Stephen Boyd // this file contains the code to perform a single cold-start #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <stdbool.h> #include "math.h" #include "cholesky.h" #include "osc.h" #define MAX_ITERS 3000 // maximum number of iterations OSC will perform #define EPS_ABS 0.001 // absolute tolerance (for testing convergence) #define EPS_REL 0.001 // relative tolerance (for testing convergence) // performs OSC algorithm and returns timing data Timings osc(prob_vars * vars, all_data * data, prox_data * p_data){ bool relax_on = fabs(data->alpha-1) < 1e-3 ? false:true; Timings tt={0}; struct timeval start,end,lin_sys_st,lin_sys_end, prox_st,prox_end; double x_t_old[data->n*(data->T+1)]; double u_t_old[data->m*(data->T+1)]; int i;double rn, dn; gettimeofday(&start, NULL); for (i=0;i<MAX_ITERS;i++){ //printf("iteration %i\n",i); memcpy(x_t_old,vars->x_t,sizeof(double)*data->n*(data->T+1)); memcpy(u_t_old,vars->u_t,sizeof(double)*data->m*(data->T+1)); gettimeofday(&lin_sys_st, NULL); solveLinSys(vars,data); gettimeofday(&lin_sys_end,NULL); tt.lin_sys_time += lin_sys_end.tv_sec*1e3 + lin_sys_end.tv_usec/1e3 - lin_sys_st.tv_sec*1e3 - lin_sys_st.tv_usec/1e3; if (relax_on) relax(vars,data); gettimeofday(&prox_st, NULL); prox(vars,data,p_data); gettimeofday(&prox_end,NULL); tt.prox_time += prox_end.tv_sec*1e3 + prox_end.tv_usec/1e3 - prox_st.tv_sec*1e3 - prox_st.tv_usec/1e3; updateDualVars(vars,data); //printAll(data, vars); if(testConvergence(data,vars,x_t_old,u_t_old,&rn,&dn)) break; } gettimeofday(&end,NULL); tt.lin_sys_time /= (i+1); tt.prox_time /= (i+1); tt.total_time = end.tv_sec*1e3 + end.tv_usec/1e3 - start.tv_sec*1e3 - start.tv_usec/1e3; tt.itns = i+1; return tt; } void printAll(all_data * data, prob_vars * vars){ printf("\n u is \n"); for(int i=0;i<data->m*(data->T+1);i++){ printf("%f\n",vars->u[i]); } printf("\n x is \n"); for(int i=0;i<data->n*(data->T+1);i++){ printf("%f\n",vars->x[i]); } printf("\n u_t is \n"); for(int i=0;i<data->m*(data->T+1);i++){ printf("%f\n",vars->u_t[i]); } printf("\n x_t is \n"); for(int i=0;i<data->n*(data->T+1);i++){ printf("%f\n",vars->x_t[i]); } printf("\n y is \n"); for(int i=0;i<data->m*(data->T+1);i++){ printf("%f\n",vars->y[i]); } printf("\n z is \n"); for(int i=0;i<data->n*(data->T+1);i++){ printf("%f\n",vars->z[i]); } } void freeVars(prob_vars * vars){ free(vars->x);free(vars->u);free(vars->x_t);free(vars->u_t);free(vars->z);free(vars->y); free(vars); } void freeData(all_data* data){ free(data->Lp);free(data->Lr);free(data->Lx);free(data->D);free(data->RHS);free(data->P);free(data->Pinv);free(data->x_init); free(data); } void relax(prob_vars * vars, all_data* data){ // #pragma omp parallel for for(int j=0;j<data->n*(data->T+1);j++){ vars->x[j] = data->alpha*vars->x[j]+(1-data->alpha)*vars->x_t[j]; } // #pragma omp parallel for for(int j=0;j<data->m*(data->T+1);j++){ vars->u[j] = data->alpha*vars->u[j]+(1-data->alpha)*vars->u_t[j]; } } double calcNormSquared(double * A,size_t len){ double norm2=0.0; // #pragma omp parallel for reduction(+: norm2) for(int i=0;i<len;i++){ norm2 += pow(A[i],2); } return norm2; } void scaleArray(double * a,double b,int len){ // #pragma omp parallel for for(int i=0;i<len;i++) a[i]*=b; } void addArray(double * a,double *b,int len){ // #pragma omp parallel for for(int i=0;i<len;i++) a[i]+=b[i]; } void subArray(double * a,double *b,int len){ // #pragma omp parallel for for(int i=0;i<len;i++) a[i]-=b[i]; } bool testConvergence(all_data * data,prob_vars * vars,double *x_t_old,double *u_t_old,double*rn,double*dn){ double u_tmp[data->m*(data->T+1)]; memcpy(u_tmp,vars->u_t,sizeof(double)*data->m*(data->T+1)); subArray(u_tmp,vars->u,data->m*(data->T+1)); double x_tmp[data->n*(data->T+1)]; memcpy(x_tmp,vars->x_t,sizeof(double)*data->n*(data->T+1)); subArray(x_tmp,vars->x,data->n*(data->T+1)); *rn = sqrt(calcNormSquared(u_tmp,data->m*(data->T+1))+calcNormSquared(x_tmp,data->n*(data->T+1))); double eps_primal = EPS_ABS*sqrt((data->T+1)*(data->n+data->m)) + EPS_REL*sqrt(fmaxl(calcNormSquared(vars->x,data->n*(data->T+1))+calcNormSquared(vars->u,data->m*(data->T+1)),calcNormSquared(vars->x_t,data->n*(data->T+1))+ calcNormSquared(vars->u_t,data->m*(data->T+1)))); if(*rn>eps_primal){ //printf("rn is %f\n",*rn); return false; } memcpy(u_tmp,vars->u_t,sizeof(double)*data->m*(data->T+1)); subArray(u_tmp,u_t_old,data->m*(data->T+1)); memcpy(x_tmp,vars->x_t,sizeof(double)*data->n*(data->T+1)); subArray(x_tmp,x_t_old,data->n*(data->T+1)); *dn = sqrt(calcNormSquared(u_tmp,data->m*(data->T+1))+calcNormSquared(x_tmp,data->n*(data->T+1))); (*dn)*=data->rho; double eps_dual = EPS_ABS*sqrt((data->T+1)*(data->n+data->m)) + EPS_REL*sqrt(calcNormSquared(vars->z,data->n*(data->T+1))+ calcNormSquared(vars->y,data->m*(data->T+1))); if(*dn>eps_dual){ //printf("dn is %f\n",*dn); return false; } return true; } void updateDualVars(prob_vars * vars,all_data *data){ addArray(vars->z,vars->x_t,data->n*(data->T+1)); subArray(vars->z,vars->x,data->n*(data->T+1)); addArray(vars->y,vars->u_t,data->m*(data->T+1)); subArray(vars->y,vars->u,data->m*(data->T+1)); } void solveLinSys(prob_vars * vars,all_data *data){ double rhs[data->nc]; memcpy(rhs,data->RHS,sizeof(double)*data->nc); // #pragma omp parallel for for(int i=0;i<data->T+1;i++){ for(int j=0;j<data->n;j++){ rhs[i*(data->n+data->m)+j]+=data->rho*(vars->x_t[i*data->n+j]+vars->z[i*data->n+j]); } for(int j=0;j<data->m;j++){ rhs[i*(data->n+data->m)+j+data->n]+=data->rho*(vars->u_t[i*data->m+j]+vars->y[i*data->m+j]); } } double *w = malloc(sizeof(double)*data->nc); choleskySolve(data->nc, w, rhs, data->Lp, data->Lr, data->Lx, data->D, data->P); // #pragma omp parallel for for(int i=0;i<data->T+1;i++){ for(int j=0;j<data->n;j++){ vars->x[i*(data->n)+j]=w[i*(data->n+data->m)+j]; } for(int j=0;j<data->m;j++){ vars->u[i*(data->m)+j]=w[i*(data->n+data->m)+data->n+j]; } } free(w); }
matrix.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M AAA TTTTT RRRR IIIII X X % % MM MM A A T R R I X X % % M M M AAAAA T RRRR I X % % M M A A T R R I X X % % M M A A T R R IIIII X X % % % % % % MagickCore Matrix Methods % % % % Software Design % % Cristy % % August 2007 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/matrix-private.h" #include "MagickCore/memory_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" /* Typedef declaration. */ struct _MatrixInfo { CacheType type; size_t columns, rows, stride; MagickSizeType length; MagickBooleanType mapped, synchronize; char path[MagickPathExtent]; int file; void *elements; SemaphoreInfo *semaphore; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a t r i x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMatrixInfo() allocates the ImageInfo structure. % % The format of the AcquireMatrixInfo method is: % % MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows, % const size_t stride,ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: the matrix columns. % % o rows: the matrix rows. % % o stride: the matrix stride. % % o exception: return any errors or warnings in this structure. % */ #if defined(SIGBUS) static void MatrixSignalHandler(int status) { ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache"); } #endif static inline MagickOffsetType WriteMatrixElements( const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file,offset,SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return((MagickOffsetType) -1); } #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) MAGICK_SSIZE_MAX)); #else count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) MAGICK_SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PWRITE) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return(i); } static MagickBooleanType SetMatrixExtent( MatrixInfo *magick_restrict matrix_info,MagickSizeType length) { MagickOffsetType count, extent, offset; if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) return(MagickTrue); extent=(MagickOffsetType) length-1; count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) ""); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (matrix_info->synchronize != MagickFalse) (void) posix_fallocate(matrix_info->file,offset+1,extent-offset); #endif #if defined(SIGBUS) (void) signal(SIGBUS,MatrixSignalHandler); #endif return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue); } MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns, const size_t rows,const size_t stride,ExceptionInfo *exception) { char *synchronize; MagickBooleanType status; MatrixInfo *matrix_info; matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info)); if (matrix_info == (MatrixInfo *) NULL) return((MatrixInfo *) NULL); (void) memset(matrix_info,0,sizeof(*matrix_info)); matrix_info->signature=MagickCoreSignature; matrix_info->columns=columns; matrix_info->rows=rows; matrix_info->stride=stride; matrix_info->semaphore=AcquireSemaphoreInfo(); synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { matrix_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } matrix_info->length=(MagickSizeType) columns*rows*stride; if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'","matrix cache"); return(DestroyMatrixInfo(matrix_info)); } matrix_info->type=MemoryCache; status=AcquireMagickResource(AreaResource,matrix_info->length); if ((status != MagickFalse) && (matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length))) { status=AcquireMagickResource(MemoryResource,matrix_info->length); if (status != MagickFalse) { matrix_info->mapped=MagickFalse; matrix_info->elements=AcquireMagickMemory((size_t) matrix_info->length); if (matrix_info->elements == NULL) { matrix_info->mapped=MagickTrue; matrix_info->elements=MapBlob(-1,IOMode,0,(size_t) matrix_info->length); } if (matrix_info->elements == (unsigned short *) NULL) RelinquishMagickResource(MemoryResource,matrix_info->length); } } matrix_info->file=(-1); if (matrix_info->elements == (unsigned short *) NULL) { status=AcquireMagickResource(DiskResource,matrix_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'","matrix cache"); return(DestroyMatrixInfo(matrix_info)); } matrix_info->type=DiskCache; matrix_info->file=AcquireUniqueFileResource(matrix_info->path); if (matrix_info->file == -1) return(DestroyMatrixInfo(matrix_info)); status=AcquireMagickResource(MapResource,matrix_info->length); if (status != MagickFalse) { status=SetMatrixExtent(matrix_info,matrix_info->length); if (status != MagickFalse) matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0, (size_t) matrix_info->length); if (matrix_info->elements != NULL) matrix_info->type=MapCache; else RelinquishMagickResource(MapResource,matrix_info->length); } } return(matrix_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a g i c k M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMagickMatrix() allocates and returns a matrix in the form of an % array of pointers to an array of doubles, with all values pre-set to zero. % % This used to generate the two dimensional matrix, and vectors required % for the GaussJordanElimination() method below, solving some system of % simultanious equations. % % The format of the AcquireMagickMatrix method is: % % double **AcquireMagickMatrix(const size_t number_rows, % const size_t size) % % A description of each parameter follows: % % o number_rows: the number pointers for the array of pointers % (first dimension). % % o size: the size of the array of doubles each pointer points to % (second dimension). % */ MagickExport double **AcquireMagickMatrix(const size_t number_rows, const size_t size) { double **matrix; ssize_t i, j; matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix)); if (matrix == (double **) NULL) return((double **) NULL); for (i=0; i < (ssize_t) number_rows; i++) { matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i])); if (matrix[i] == (double *) NULL) { for (j=0; j < i; j++) matrix[j]=(double *) RelinquishMagickMemory(matrix[j]); matrix=(double **) RelinquishMagickMemory(matrix); return((double **) NULL); } for (j=0; j < (ssize_t) size; j++) matrix[i][j]=0.0; } return(matrix); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y M a t r i x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyMatrixInfo() dereferences a matrix, deallocating memory associated % with the matrix. % % The format of the DestroyImage method is: % % MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); LockSemaphoreInfo(matrix_info->semaphore); switch (matrix_info->type) { case MemoryCache: { if (matrix_info->mapped == MagickFalse) matrix_info->elements=RelinquishMagickMemory(matrix_info->elements); else { (void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length); matrix_info->elements=(unsigned short *) NULL; } RelinquishMagickResource(MemoryResource,matrix_info->length); break; } case MapCache: { (void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length); matrix_info->elements=NULL; RelinquishMagickResource(MapResource,matrix_info->length); } case DiskCache: { if (matrix_info->file != -1) (void) close(matrix_info->file); (void) RelinquishUniqueFileResource(matrix_info->path); RelinquishMagickResource(DiskResource,matrix_info->length); break; } default: break; } UnlockSemaphoreInfo(matrix_info->semaphore); RelinquishSemaphoreInfo(&matrix_info->semaphore); return((MatrixInfo *) RelinquishMagickMemory(matrix_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G a u s s J o r d a n E l i m i n a t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussJordanElimination() returns a matrix in reduced row echelon form, % while simultaneously reducing and thus solving the augumented results % matrix. % % See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % % The format of the GaussJordanElimination method is: % % MagickBooleanType GaussJordanElimination(double **matrix, % double **vectors,const size_t rank,const size_t number_vectors) % % A description of each parameter follows: % % o matrix: the matrix to be reduced, as an 'array of row pointers'. % % o vectors: the additional matrix argumenting the matrix for row reduction. % Producing an 'array of column vectors'. % % o rank: The size of the matrix (both rows and columns). % Also represents the number terms that need to be solved. % % o number_vectors: Number of vectors columns, argumenting the above matrix. % Usally 1, but can be more for more complex equation solving. % % Note that the 'matrix' is given as a 'array of row pointers' of rank size. % That is values can be assigned as matrix[row][column] where 'row' is % typically the equation, and 'column' is the term of the equation. % That is the matrix is in the form of a 'row first array'. % % However 'vectors' is a 'array of column pointers' which can have any number % of columns, with each column array the same 'rank' size as 'matrix'. % % This allows for simpler handling of the results, especially is only one % column 'vector' is all that is required to produce the desired solution. % % For example, the 'vectors' can consist of a pointer to a simple array of % doubles. when only one set of simultanious equations is to be solved from % the given set of coefficient weighted terms. % % double **matrix = AcquireMagickMatrix(8UL,8UL); % double coefficents[8]; % ... % GaussJordanElimination(matrix, &coefficents, 8UL, 1UL); % % However by specifing more 'columns' (as an 'array of vector columns', % you can use this function to solve a set of 'separable' equations. % % For example a distortion function where u = U(x,y) v = V(x,y) % And the functions U() and V() have separate coefficents, but are being % generated from a common x,y->u,v data set. % % Another example is generation of a color gradient from a set of colors at % specific coordients, such as a list x,y -> r,g,b,a. % % You can also use the 'vectors' to generate an inverse of the given 'matrix' % though as a 'column first array' rather than a 'row first array'. For % details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % */ MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix, double **vectors,const size_t rank,const size_t number_vectors) { #define GaussJordanSwap(x,y) \ { \ if ((x) != (y)) \ { \ (x)+=(y); \ (y)=(x)-(y); \ (x)=(x)-(y); \ } \ } double max, scale; ssize_t i, j, k; ssize_t column, *columns, *pivots, row, *rows; columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns)); rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows)); pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots)); if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) || (pivots == (ssize_t *) NULL)) { if (pivots != (ssize_t *) NULL) pivots=(ssize_t *) RelinquishMagickMemory(pivots); if (columns != (ssize_t *) NULL) columns=(ssize_t *) RelinquishMagickMemory(columns); if (rows != (ssize_t *) NULL) rows=(ssize_t *) RelinquishMagickMemory(rows); return(MagickFalse); } (void) memset(columns,0,rank*sizeof(*columns)); (void) memset(rows,0,rank*sizeof(*rows)); (void) memset(pivots,0,rank*sizeof(*pivots)); column=0; row=0; for (i=0; i < (ssize_t) rank; i++) { max=0.0; for (j=0; j < (ssize_t) rank; j++) if (pivots[j] != 1) { for (k=0; k < (ssize_t) rank; k++) if (pivots[k] != 0) { if (pivots[k] > 1) return(MagickFalse); } else if (fabs(matrix[j][k]) >= max) { max=fabs(matrix[j][k]); row=j; column=k; } } pivots[column]++; if (row != column) { for (k=0; k < (ssize_t) rank; k++) GaussJordanSwap(matrix[row][k],matrix[column][k]); for (k=0; k < (ssize_t) number_vectors; k++) GaussJordanSwap(vectors[k][row],vectors[k][column]); } rows[i]=row; columns[i]=column; if (matrix[column][column] == 0.0) return(MagickFalse); /* sigularity */ scale=PerceptibleReciprocal(matrix[column][column]); matrix[column][column]=1.0; for (j=0; j < (ssize_t) rank; j++) matrix[column][j]*=scale; for (j=0; j < (ssize_t) number_vectors; j++) vectors[j][column]*=scale; for (j=0; j < (ssize_t) rank; j++) if (j != column) { scale=matrix[j][column]; matrix[j][column]=0.0; for (k=0; k < (ssize_t) rank; k++) matrix[j][k]-=scale*matrix[column][k]; for (k=0; k < (ssize_t) number_vectors; k++) vectors[k][j]-=scale*vectors[k][column]; } } for (j=(ssize_t) rank-1; j >= 0; j--) if (columns[j] != rows[j]) for (i=0; i < (ssize_t) rank; i++) GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]); pivots=(ssize_t *) RelinquishMagickMemory(pivots); rows=(ssize_t *) RelinquishMagickMemory(rows); columns=(ssize_t *) RelinquishMagickMemory(columns); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x C o l u m n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixColumns() returns the number of columns in the matrix. % % The format of the GetMatrixColumns method is: % % size_t GetMatrixColumns(const MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return(matrix_info->columns); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x E l e m e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixElement() returns the specifed element in the matrix. % % The format of the GetMatrixElement method is: % % MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t x,const ssize_t y,void *value) % % A description of each parameter follows: % % o matrix_info: the matrix columns. % % o x: the matrix x-offset. % % o y: the matrix y-offset. % % o value: return the matrix element in this buffer. % */ static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline MagickOffsetType ReadMatrixElements( const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file,offset,SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return((MagickOffsetType) -1); } #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) MAGICK_SSIZE_MAX)); #else count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) MAGICK_SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PREAD) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return(i); } MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, const ssize_t x,const ssize_t y,void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+ EdgeX(x,matrix_info->columns); if (matrix_info->type != DiskCache) { (void) memcpy(value,(unsigned char *) matrix_info->elements+i* matrix_info->stride,matrix_info->stride); return(MagickTrue); } count=ReadMatrixElements(matrix_info,i*matrix_info->stride, matrix_info->stride,(unsigned char *) value); if (count != (MagickOffsetType) matrix_info->stride) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x R o w s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixRows() returns the number of rows in the matrix. % % The format of the GetMatrixRows method is: % % size_t GetMatrixRows(const MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info) { assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return(matrix_info->rows); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L e a s t S q u a r e s A d d T e r m s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LeastSquaresAddTerms() adds one set of terms and associate results to the % given matrix and vectors for solving using least-squares function fitting. % % The format of the AcquireMagickMatrix method is: % % void LeastSquaresAddTerms(double **matrix,double **vectors, % const double *terms,const double *results,const size_t rank, % const size_t number_vectors); % % A description of each parameter follows: % % o matrix: the square matrix to add given terms/results to. % % o vectors: the result vectors to add terms/results to. % % o terms: the pre-calculated terms (without the unknown coefficent % weights) that forms the equation being added. % % o results: the result(s) that should be generated from the given terms % weighted by the yet-to-be-solved coefficents. % % o rank: the rank or size of the dimensions of the square matrix. % Also the length of vectors, and number of terms being added. % % o number_vectors: Number of result vectors, and number or results being % added. Also represents the number of separable systems of equations % that is being solved. % % Example of use... % % 2 dimensional Affine Equations (which are separable) % c0*x + c2*y + c4*1 => u % c1*x + c3*y + c5*1 => v % % double **matrix = AcquireMagickMatrix(3UL,3UL); % double **vectors = AcquireMagickMatrix(2UL,3UL); % double terms[3], results[2]; % ... % for each given x,y -> u,v % terms[0] = x; % terms[1] = y; % terms[2] = 1; % results[0] = u; % results[1] = v; % LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL); % ... % if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) { % c0 = vectors[0][0]; % c2 = vectors[0][1]; % c4 = vectors[0][2]; % c1 = vectors[1][0]; % c3 = vectors[1][1]; % c5 = vectors[1][2]; % } % else % printf("Matrix unsolvable\n"); % RelinquishMagickMatrix(matrix,3UL); % RelinquishMagickMatrix(vectors,2UL); % */ MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors, const double *terms,const double *results,const size_t rank, const size_t number_vectors) { ssize_t i, j; for (j=0; j < (ssize_t) rank; j++) { for (i=0; i < (ssize_t) rank; i++) matrix[i][j]+=terms[i]*terms[j]; for (i=0; i < (ssize_t) number_vectors; i++) vectors[i][j]+=results[i]*terms[j]; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a t r i x T o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MatrixToImage() returns a matrix as an image. The matrix elements must be % of type double otherwise nonsense is returned. % % The format of the MatrixToImage method is: % % Image *MatrixToImage(const MatrixInfo *matrix_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o matrix_info: the matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info, ExceptionInfo *exception) { CacheView *image_view; double max_value, min_value, scale_factor; Image *image; MagickBooleanType status; ssize_t y; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (matrix_info->stride < sizeof(double)) return((Image *) NULL); /* Determine range of matrix. */ (void) GetMatrixElement(matrix_info,0,0,&min_value); max_value=min_value; for (y=0; y < (ssize_t) matrix_info->rows; y++) { ssize_t x; for (x=0; x < (ssize_t) matrix_info->columns; x++) { double value; if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse) continue; if (value < min_value) min_value=value; else if (value > max_value) max_value=value; } } if ((min_value == 0.0) && (max_value == 0.0)) scale_factor=0; else if (min_value == max_value) { scale_factor=(double) QuantumRange/min_value; min_value=0; } else scale_factor=(double) QuantumRange/(max_value-min_value); /* Convert matrix to image. */ image=AcquireImage((ImageInfo *) NULL,exception); image->columns=matrix_info->columns; image->rows=matrix_info->rows; image->colorspace=GRAYColorspace; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double value; Quantum *q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse) continue; value=scale_factor*(value-min_value); *q=ClampToQuantum(value); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N u l l M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NullMatrix() sets all elements of the matrix to zero. % % The format of the memset method is: % % MagickBooleanType *NullMatrix(MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info) { ssize_t x; ssize_t count, y; unsigned char value; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); if (matrix_info->type != DiskCache) { (void) memset(matrix_info->elements,0,(size_t) matrix_info->length); return(MagickTrue); } value=0; (void) lseek(matrix_info->file,0,SEEK_SET); for (y=0; y < (ssize_t) matrix_info->rows; y++) { for (x=0; x < (ssize_t) matrix_info->length; x++) { count=write(matrix_info->file,&value,sizeof(value)); if (count != (ssize_t) sizeof(value)) break; } if (x < (ssize_t) matrix_info->length) break; } return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e l i n q u i s h M a g i c k M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RelinquishMagickMatrix() frees the previously acquired matrix (array of % pointers to arrays of doubles). % % The format of the RelinquishMagickMatrix method is: % % double **RelinquishMagickMatrix(double **matrix, % const size_t number_rows) % % A description of each parameter follows: % % o matrix: the matrix to relinquish % % o number_rows: the first dimension of the acquired matrix (number of % pointers) % */ MagickExport double **RelinquishMagickMatrix(double **matrix, const size_t number_rows) { ssize_t i; if (matrix == (double **) NULL ) return(matrix); for (i=0; i < (ssize_t) number_rows; i++) matrix[i]=(double *) RelinquishMagickMemory(matrix[i]); matrix=(double **) RelinquishMagickMemory(matrix); return(matrix); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t M a t r i x E l e m e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetMatrixElement() sets the specifed element in the matrix. % % The format of the SetMatrixElement method is: % % MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t x,const ssize_t y,void *value) % % A description of each parameter follows: % % o matrix_info: the matrix columns. % % o x: the matrix x-offset. % % o y: the matrix y-offset. % % o value: set the matrix element to this value. % */ MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info, const ssize_t x,const ssize_t y,const void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); i=(MagickOffsetType) y*matrix_info->columns+x; if ((i < 0) || ((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length)) return(MagickFalse); if (matrix_info->type != DiskCache) { (void) memcpy((unsigned char *) matrix_info->elements+i* matrix_info->stride,value,matrix_info->stride); return(MagickTrue); } count=WriteMatrixElements(matrix_info,i*matrix_info->stride, matrix_info->stride,(unsigned char *) value); if (count != (MagickOffsetType) matrix_info->stride) return(MagickFalse); return(MagickTrue); }
nested_parallel_for_irregular_omp.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * See LICENSE.txt in top-level directory. */ /* Nested Pragma omp parallel for directive evaluation * Output: avg time */ #include <assert.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #define NUM_ELEMS 5017600 /* 2GB */ #define NUM_REPS 1 int main(int argc, char *argv[]) { int i, j, r, nthreads; double *time, avg_time = 0.0; #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); } } int n = (argc > 1) ? atoi(argv[1]) : NUM_ELEMS; int in_th = (argc > 2) ? atoi(argv[2]) : nthreads; int rep = (argc > 3) ? atoi(argv[3]) : 3; int it = ceil(sqrt((double)n)); srand(1983); n = it * it; time = (double *)malloc(sizeof(double) * rep); for (r = 0; r < rep; r++) { time[r] = omp_get_wtime(); #pragma omp parallel for for (j = 0; j < it; j++) { omp_set_num_threads(in_th); #pragma omp parallel for for (i = 0; i < it; i++) { int random = rand() % 10000; volatile int kk = 0; int k; for (k = 0; k < random; k++) kk++; assert(kk == random); } } time[r] = omp_get_wtime() - time[r]; avg_time += time[r]; } avg_time /= rep; printf("%d %d %d %f\n", nthreads, in_th, n, avg_time); free(time); return EXIT_SUCCESS; }
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register Quantum *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char ) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static StringInfo *ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image,ExceptionInfo *exception) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { char value[MagickPathExtent]; unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatLocaleString(value,MagickPathExtent,"%g", image->resolution.x); (void) SetImageProperty(image,"tiff:XResolution",value,exception); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatLocaleString(value,MagickPathExtent,"%g", image->resolution.y); (void) SetImageProperty(image,"tiff:YResolution",value,exception); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) *has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline void ReversePSDString(Image *image,char *p,size_t length) { char *q; if (image->endian == MSBEndian) return; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { PixelInfo *color; if (type == 0) { if (packet_size == 1) SetPixelIndex(image,ScaleQuantumToChar(pixel),q); else SetPixelIndex(image,ScaleQuantumToShort(pixel),q); } color=image->colormap+(ssize_t) ConstrainColormapIndex(image, (ssize_t) GetPixelIndex(image,q),exception); if ((type == 0) && (channels > 1)) return; else color->alpha=(MagickRealType) pixel; SetPixelViaPixelInfo(image,color,q); return; } switch (type) { case -1: { SetPixelAlpha(image,pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); break; } case -3: case 1: { SetPixelGreen(image,pixel,q); break; } case -4: case 2: { SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register Quantum *q; register ssize_t x; size_t packet_size; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) { status=MagickFalse; break; } status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) /* arbitrary number */ { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } /* else if (packet_size == 4) { TODO: Figure out what to do there. } */ else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) SetImageType(mask,GrayscaleType,exception); channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); /* TODO: Remove this when we figure out how to support this */ if ((compression == ZipWithPrediction) && (image->depth == 32)) { (void) ThrowMagickException(exception,GetMagickModule(), TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)"); return(MagickFalse); } layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void AttachPSDLayers(Image *image,LayerInfo *layer_info, ssize_t number_layers) { register ssize_t i; ssize_t j; for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers == 0) { layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const ImageInfo *image_info, const size_t index) { if (image_info->number_scenes == 0) return(MagickFalse); if (index < image_info->scene) return(MagickTrue); if (index > image_info->scene+image_info->number_scenes-1) return(MagickTrue); return(MagickFalse); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,(size_t) count); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } if (size == 0) return(MagickTrue); layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t x, y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); layer_info[i].page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].page.x=(ssize_t) ReadBlobSignedLong(image); y=(ssize_t) ReadBlobSignedLong(image); x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].page.width=(size_t) (x-layer_info[i].page.x); layer_info[i].page.height=(size_t) (y-layer_info[i].page.y); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,(double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image,layer_info,number_layers); return(MagickTrue); } status=MagickTrue; for (i=0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(image_info,i) != MagickFalse)) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image,layer_info,number_layers); else layer_info=DestroyLayerInfo(layer_info,number_layers); return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return(MagickTrue); compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t imageListLength; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace,exception); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace,exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,(size_t) (psd_info.depth < 16 ? 256 : 65536),exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace,exception); } if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. 32 bits per pixel; the colormap is ignored. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(image,blocks,(size_t) length, &has_merged_image,exception); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); imageListLength=GetImageListLength(image); if ((has_merged_image != MagickFalse) || (imageListLength == 1)) has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image, &psd_info,exception); if ((has_merged_image == MagickFalse) && (imageListLength == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (has_merged_image == MagickFalse) { Image *merged; if (imageListLength == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.alpha=(MagickRealType) TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(image,exception); merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; i=0; next=image; while (next != (Image *) NULL) { if (PSDSkipImage(image_info,i++) == MagickFalse) (void) SetImageProfile(next,GetStringInfoName(profile),profile, exception); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobLong(image,(unsigned int) size)); return(WriteBlobLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); result=SetPSDSize(psd_info,image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length=(size_t) WriteBlobShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length=(size_t) WriteBlobShort(image,ZipWithoutPrediction); #endif else length=(size_t) WriteBlobShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, const CompressionType compression,ExceptionInfo *exception) { MagickBooleanType monochrome; QuantumInfo *quantum_info; register const Quantum *p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE #define CHUNK 16384 int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK, sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) CHUNK; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) CHUNK-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { CompressionType compression; Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; compression=next_image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if (next_image->storage_class != PseudoClass) { if (IsImageGray(next_image) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression, (ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if (next_image->storage_class == PseudoClass) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,compression, exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=(size_t) WriteBlobShort(image,(const unsigned short) channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size, ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; register ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status=MagickTrue; base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobShort(image,-(unsigned short) layer_count); else size+=WriteBlobShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobSignedLong(image,(signed int) next_image->page.y); size+=WriteBlobSignedLong(image,(signed int) next_image->page.x); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+ next_image->rows)); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(psd_info,image,-2); size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM"); size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobLong(image,20); size+=WriteBlobSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } /* Write the total size */ if (layers_size != (size_t*) NULL) *layers_size=size; if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } return(status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=WritePolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t length, num_channels, packet_size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
ej4.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <unistd.h> #define TAM 100 void asignarValores(float *v){ for(int i=0;i<TAM;++i) *(v+i)=rand()%1000; } void obtenerMayor(float *v) { float res=0; #pragma omp parallel for firstprivate(res) lastprivate(res) for(int i=0;i<TAM;++i){ #pragma omp critical { if(*(v+i)>res) res = *(v+i); } } printf("\nEl valor mas grande del vector es: %f", res); } void obtenerMenor(float *v) { float res=10000; #pragma omp parallel for firstprivate(res) lastprivate(res) for(int i=0;i<TAM;++i){ #pragma omp critical { if(*(v+i)<res) res = *(v+i); } } printf("\nEl valor mas pequenyo del vector es: %f", res); } int main() { float *v = (float *)malloc(sizeof(float)*TAM); asignarValores(v); double start = omp_get_wtime(); #pragma omp parallel sections num_threads(2) { #pragma omp section { obtenerMayor(v); } #pragma omp section { obtenerMenor(v); } } printf("\n-------------------------------------------\nTiempo de ejecucion del programa %lfs\n-------------------------------------------\n", omp_get_wtime()-start); return 0; }
ompfor-default.c
/* * default loop scheduling */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int main(void) { int i,j; #pragma omp parallel { #pragma omp single printf ("Using %d threads.\n",omp_get_num_threads()); #pragma omp for private(j) for (i=0;i<10;i++) { j = omp_get_thread_num(); printf("Iteration %d, by thread %d\n", i, j); } } return 0; }
deflated_gmres_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_DEFLATED_GMRES_SOLVER_H_INCLUDED ) #define KRATOS_DEFLATED_GMRES_SOLVER_H_INCLUDED // System includes #include <string> #include <iostream> #include <fstream> #include <sstream> #include <cstddef> // External includes // Project includes #include "includes/define.h" #include "reorderer.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" #include "linear_solvers/iterative_solver.h" #include <boost/numeric/ublas/vector.hpp> #include "utilities/openmp_utils.h" //#define NO_PRECOND namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** This solver is designed for the solution of mixed U-P problems. * It uses a block structure diving the matrix in UU PP UP PU blocks * and uses "standard" linear solvers for the different blocks as well as a GMRES for the outer part */ template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType = Preconditioner<TSparseSpaceType, TDenseSpaceType>, class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> > class DeflatedGMRESSolver : public IterativeSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType> { public: ///@name Type Definitions ///@{ /// Pointer definition of DeflatedGMRESSolver KRATOS_CLASS_POINTER_DEFINITION (DeflatedGMRESSolver); typedef IterativeSolver<TSparseSpaceType, TDenseSpaceType, TPreconditionerType, TReordererType> BaseType; typedef typename TSparseSpaceType::MatrixType SparseMatrixType; typedef typename TSparseSpaceType::VectorType VectorType; typedef typename TDenseSpaceType::MatrixType DenseMatrixType; typedef typename TDenseSpaceType::VectorType DenseVectorType; typedef std::size_t SizeType; ///@} ///@name Life Cycle ///@{ /// Default constructor. DeflatedGMRESSolver (typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer pred_solver, double NewMaxTolerance, unsigned int NewMaxIterationsNumber, unsigned int m, unsigned int max_reduced_size ) : BaseType (NewMaxTolerance, NewMaxIterationsNumber) { //saving the linear solvers to be used in the solution process //mpsolver_UU_block = psolver_UU_block; //mpsolver_PP_block = psolver_PP_block; //this is the solver used at the prediction step before entering the GMRES loop... can be direct or iterative mPred_solver = pred_solver; mBlocksAreAllocated = false; mis_initialized = false; mm = m; mmax_reduced_size=max_reduced_size; KRATOS_WATCH("Quasi-deflated solver created") std::cout<<"Krylov space size is"<< mm<<std::endl; std::cout<<"Maximum deflated matrix size is"<< mmax_reduced_size<<std::endl; myfile.open("iterations.txt"); } /// Copy constructor. DeflatedGMRESSolver (const DeflatedGMRESSolver& Other) { KRATOS_THROW_ERROR (std::logic_error,"copy constructor not correctly implemented",""); } /// Destructor. ~DeflatedGMRESSolver() override {} ///@} ///@name Operators ///@{ /// Assignment operator. DeflatedGMRESSolver& operator= (const DeflatedGMRESSolver& Other) { return *this; } ///@} ///@name Operations ///@{ /** This function is designed to be called as few times as possible. It creates the data structures * that only depend on the connectivity of the matrix (and not on its coefficients) * so that the memory can be allocated once and expensive operations can be done only when strictly * needed @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ void Initialize (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { if (mBlocksAreAllocated == true) { mis_initialized = true; } else { std::cout << "linear solver intialization is deferred to the moment at which blocks are available" << std::endl; } } /** This function is designed to be called every time the coefficients change in the system * that is, normally at the beginning of each solve. * For example if we are implementing a direct solver, this is the place to do the factorization * so that then the backward substitution can be performed effectively more than once @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ void InitializeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { //copy to local matrices if (mBlocksAreAllocated == false) { FillBlockMatrices (true, rA, mK, mG, mD, mS); mBlocksAreAllocated = true; } else { FillBlockMatrices (false, rA, mK, mG, mD, mS); mBlocksAreAllocated = true; } if(mis_initialized == false) this->Initialize(rA,rX,rB); } /** This function actually performs the solution work, eventually taking advantage of what was done before in the * Initialize and InitializeSolutionStep functions. @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ void PerformSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { unsigned int m = mm; unsigned int max_iter = BaseType::GetMaxIterationsNumber(); double tol = BaseType::GetTolerance(); gmres_solve (rA,rX,rB,m,max_iter,tol); } /** This function is designed to be called at the end of the solve step. * for example this is the place to remove any data that we do not want to save for later @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ void FinalizeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { } /** This function is designed to clean up all internal data in the solver. * Clear is designed to leave the solver object as if newly created. * After a clear a new Initialize is needed */ void Clear() override { mK.clear(); mG.clear(); mD.clear(); mS.clear(); mBlocksAreAllocated = false; mPred_solver->Clear(); mu.clear(); mp.clear(); mru.clear(); mrp.clear(); mis_initialized = false; } /** Normal solve method. Solves the linear system Ax=b and puts the result on SystemVector& rX. rVectorx is also th initial guess for iterative methods. @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ bool Solve(SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { if (mis_initialized == false) this->Initialize (rA,rX,rB); this->InitializeSolutionStep (rA,rX,rB); this->PerformSolutionStep (rA,rX,rB); this->FinalizeSolutionStep (rA,rX,rB); return false; } /** Multi solve method for solving a set of linear systems with same coefficient matrix. Solves the linear system Ax=b and puts the result on SystemVector& rX. rVectorx is also th initial guess for iterative methods. @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ bool Solve (SparseMatrixType& rA, DenseMatrixType& rX, DenseMatrixType& rB) override { return false; } /** Eigenvalue and eigenvector solve method for derived eigensolvers */ void Solve (SparseMatrixType& K, SparseMatrixType& M, DenseVectorType& Eigenvalues, DenseMatrixType& Eigenvectors) override {} /** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example * when solving a mixed u-p problem, it is important to identify the row associated to v and p. * another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers * which require knowledge on the spatial position of the nodes associated to a given dof. * This function tells if the solver requires such data */ bool AdditionalPhysicalDataIsNeeded() override { return true; } /** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example * when solving a mixed u-p problem, it is important to identify the row associated to v and p. * another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers * which require knowledge on the spatial position of the nodes associated to a given dof. * This function is the place to eventually provide such data */ void ProvideAdditionalData ( SparseMatrixType& rA, VectorType& rX, VectorType& rB, typename ModelPart::DofsArrayType& rdof_set, ModelPart& r_model_part ) override { //count pressure dofs unsigned int n_pressure_dofs = 0; unsigned int tot_active_dofs = 0; for (ModelPart::DofsArrayType::iterator it = rdof_set.begin(); it!=rdof_set.end(); it++) { if (it->EquationId() < rA.size1()) { tot_active_dofs += 1; if (it->GetVariable().Key() == PRESSURE) n_pressure_dofs += 1; } } if (tot_active_dofs != rA.size1() ) KRATOS_THROW_ERROR (std::logic_error,"total system size does not coincide with the free dof map",""); //resize arrays as needed mpressure_indices.resize (n_pressure_dofs,false); unsigned int other_dof_size = tot_active_dofs - n_pressure_dofs; mother_indices.resize (other_dof_size,false); mglobal_to_local_indexing.resize (tot_active_dofs,false); mis_pressure_block.resize (tot_active_dofs,false); //construct aux_lists as needed //"other_counter[i]" i will contain the position in the global system of the i-th NON-pressure node //"pressure_counter[i]" will contain the in the global system of the i-th NON-pressure node // //mglobal_to_local_indexing[i] will contain the position in the local blocks of the unsigned int pressure_counter = 0; unsigned int other_counter = 0; unsigned int global_pos = 0; for (ModelPart::DofsArrayType::iterator it = rdof_set.begin(); it!=rdof_set.end(); it++) { if (it->EquationId() < rA.size1()) { if (it->GetVariable().Key() == PRESSURE) { mpressure_indices[pressure_counter] = global_pos; mglobal_to_local_indexing[global_pos] = pressure_counter; mis_pressure_block[global_pos] = true; pressure_counter++; } else { mother_indices[other_counter] = global_pos; mglobal_to_local_indexing[global_pos] = other_counter; mis_pressure_block[global_pos] = false; other_counter++; } global_pos++; } } } /* void ProvideAdditionalData ( SparseMatrixType& rA, VectorType& rX, VectorType& rB, typename ModelPart::DofsArrayType& rdof_set, ModelPart& r_model_part ) { //count pressure dofs unsigned int n_pressure_dofs = 0; unsigned int tot_active_dofs = 0; for (ModelPart::DofsArrayType::iterator it = rdof_set.begin(); it!=rdof_set.end(); it++) // if (it->IsFixed() != true) { tot_active_dofs += 1; if (it->GetVariable().Key() == PRESSURE) n_pressure_dofs += 1; } //KRATOS_WATCH(rA.size1()) //KRATOS_WATCH(tot_active_dofs) // if (tot_active_dofs != rA.size1() ) // KRATOS_THROW_ERROR (std::logic_error,"total system size does not coincide with the free dof map",""); //resize arrays as needed mpressure_indices.resize (n_pressure_dofs,false); unsigned int other_dof_size = tot_active_dofs - n_pressure_dofs; mother_indices.resize (other_dof_size,false); mglobal_to_local_indexing.resize (tot_active_dofs,false); mis_pressure_block.resize (tot_active_dofs,false); //construct aux_lists as needed //"other_counter[i]" i will contain the position in the global system of the i-th NON-pressure node //"pressure_counter[i]" will contain the in the global system of the i-th NON-pressure node // //mglobal_to_local_indexing[i] will contain the position in the local blocks of the unsigned int pressure_counter = 0; unsigned int other_counter = 0; unsigned int global_pos = 0; for (ModelPart::DofsArrayType::iterator it = rdof_set.begin(); it!=rdof_set.end(); it++) { // if (it->IsFixed() != true) // { if (it->GetVariable().Key() == PRESSURE) { mpressure_indices[pressure_counter] = global_pos; mglobal_to_local_indexing[global_pos] = pressure_counter; mis_pressure_block[global_pos] = true; pressure_counter++; } else { mother_indices[other_counter] = global_pos; mglobal_to_local_indexing[global_pos] = other_counter; mis_pressure_block[global_pos] = false; other_counter++; } global_pos++; // } } } */ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "Linear solver"; } /// Print information about this object. void PrintInfo (std::ostream& rOStream) const override { rOStream << "Linear solver"; } /// Print object's data. void PrintData (std::ostream& rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///this function generates the subblocks of matrix A ///as A = ( K G ) u /// ( D S ) p /// subblocks are allocated or nor depending on the value of "need_allocation" void FillBlockMatrices (bool need_allocation, SparseMatrixType& rA, SparseMatrixType& K, SparseMatrixType& G, SparseMatrixType& D, SparseMatrixType& S ) { KRATOS_TRY KRATOS_WATCH("FILLING BLOCK MATRICES") //get access to A data const std::size_t* index1 = rA.index1_data().begin(); const std::size_t* index2 = rA.index2_data().begin(); const double* values = rA.value_data().begin(); SparseMatrixType L(mpressure_indices.size(),mpressure_indices.size() ); if (need_allocation == true) { K.clear(); G.clear(); D.clear(); S.clear(); L.clear(); //do allocation K.resize (mother_indices.size() ,mother_indices.size() ); G.resize (mother_indices.size() ,mpressure_indices.size() ); D.resize (mpressure_indices.size(),mother_indices.size() ); S.resize (mpressure_indices.size(),mpressure_indices.size() ); mrp.resize(mpressure_indices.size() ); mru.resize(mother_indices.size() ); mp.resize(mpressure_indices.size()); mu.resize(mother_indices.size()); //KRATOS_WATCH (mglobal_to_local_indexing); //allocate the blocks by push_back for (unsigned int i=0; i<rA.size1(); i++) { unsigned int row_begin = index1[i]; unsigned int row_end = index1[i+1]; unsigned int local_row_id = mglobal_to_local_indexing[i]; if ( mis_pressure_block[i] == false) //either K or G { for (unsigned int j=row_begin; j<row_end; j++) { unsigned int col_index = index2[j]; double value = values[j]; unsigned int local_col_id = mglobal_to_local_indexing[col_index]; if (mis_pressure_block[col_index] == false) //K block K.push_back ( local_row_id, local_col_id, value); else //G block G.push_back ( local_row_id, local_col_id, value); } } else //either D or S { for (unsigned int j=row_begin; j<row_end; j++) { unsigned int col_index = index2[j]; double value = values[j]; unsigned int local_col_id = mglobal_to_local_indexing[col_index]; if (mis_pressure_block[col_index] == false) //D block D.push_back ( local_row_id, local_col_id, value); else //S block L.push_back ( local_row_id, local_col_id, value); } } } S = L; VectorType diagK (mother_indices.size() ); ComputeDiagonalByLumping (K,diagK); } else //allocation is not needed so only do copying { for (unsigned int i=0; i<rA.size1(); i++) { unsigned int row_begin = index1[i]; unsigned int row_end = index1[i+1]; unsigned int local_row_id = mglobal_to_local_indexing[i]; if ( mis_pressure_block[i] == false ) //either K or G { for (unsigned int j=row_begin; j<row_end; j++) { unsigned int col_index = index2[j]; double value = values[j]; unsigned int local_col_id = mglobal_to_local_indexing[col_index]; if (mis_pressure_block[col_index] == false) //K block K( local_row_id, local_col_id) = value; else //G block G( local_row_id, local_col_id) = value; } } else //either D or S { for (unsigned int j=row_begin; j<row_end; j++) { unsigned int col_index = index2[j]; double value = values[j]; unsigned int local_col_id = mglobal_to_local_indexing[col_index]; if (mis_pressure_block[col_index] == false) //D block D( local_row_id, local_col_id) = value; else //S block L( local_row_id, local_col_id) = value; } } } S = L; VectorType diagK (mother_indices.size() ); ComputeDiagonalByLumping (K,diagK); } KRATOS_CATCH ("") } ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ /// A counted pointer to the reorderer object. //typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer mpsolver_UU_block; //typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer mpsolver_PP_block; typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer mPred_solver; unsigned int mm; unsigned int mmax_reduced_size; bool mBlocksAreAllocated; bool mis_initialized; DenseVector<unsigned int> mpressure_indices; DenseVector<unsigned int> mother_indices; DenseVector<int> mglobal_to_local_indexing; DenseVector<int> mis_pressure_block; SparseMatrixType mK; SparseMatrixType mG; SparseMatrixType mD; SparseMatrixType mS; VectorType mrp; VectorType mru; VectorType mp; VectorType mu; std::ofstream myfile; ///@} ///@name Private Operators ///@{ inline void GeneratePlaneRotation (const double &dx, const double &dy, double &cs, double &sn) { if (dy == 0.0) { cs = 1.0; sn = 0.0; } else if (dx == 0.0) { cs = 0.0; sn = 1.0; } else { const double rnorm = 1.0/sqrt (dx*dx + dy*dy); cs = fabs (dx) * rnorm; sn = cs * dy / dx; } } inline void ApplyPlaneRotation (double &dx, double &dy, const double &cs, const double &sn) { double temp = cs * dx + sn * dy; dy = cs * dy - sn * dx; dx = temp; } void Update (VectorType& y, VectorType& x, int k, Matrix& h, VectorType& s, std::vector< VectorType >& V) { for (unsigned int i=0; i<s.size(); i++) y[i] = s[i]; /* for(unsigned int i=s.size(); i<y.size(); i++) y[i] = 0.0;*/ // Backsolve: for (int i = k; i >= 0; --i) { y (i) /= h (i,i); for (int j = i - 1; j >= 0; --j) y (j) -= h (j,i) * y (i); } //create new search dir for (int j = 0; j <= k; ++j) TSparseSpaceType::UnaliasedAdd (x, y[j], V[j]); // x += y(j)* V[j]; } int gmres_solve ( SparseMatrixType& A, VectorType& x, const VectorType& b, unsigned int& m, unsigned int& max_iter, double& tol) { const unsigned int dim = A.size1(); if (m == 0) KRATOS_THROW_ERROR (std::logic_error,"the dimension of the GMRES krylov space can not be set to zero. Please change the value of m","") if (m > max_iter) m = max_iter; //KRATOS_WATCH("Krylov space size") //KRATOS_WATCH(m) VectorType s (m+1), sn (m+1), w (dim), r (dim), y (m+1); /////////THINGS NECESSARY FOR DEFLATION/////////////////////////////////////////////////////////////////////////// SparseMatrixType S_deflated; // //THIS ww is the matrix W in vector form // std::vector<int> W; // // DeflationUtils::ConstructW(mmax_reduced_size, mS, W, S_deflated); // DeflationUtils::FillDeflatedMatrix(mS, W, S_deflated); // int red_dim=S_deflated.size1(); // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// VectorType cs (m+1); Matrix H (m+1, m+1); int restart = 0; int p_dim=mS.size1(); VectorType output(p_dim);//WT*lambda VectorType temp (dim,0.0); double normb = TSparseSpaceType::TwoNorm (b); /*KRATOS_WATCH(normb);*/ if (normb < 1e-16) //ARBITRARY SMALL NUMBER! { normb = 1e-16; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// //get the residual //r = b - Ax TSparseSpaceType::Mult (A,x,r); TSparseSpaceType::ScaleAndAdd (1.00, b, -1.00, r); //r = b - r //CHECKING IF THE MATRIX IS INVERTIBLE!!!! If it is not (i.e. if S_deflated*Identity=0, we add a number to diagonal) //CheckDeflatedMatrix(S_deflated); #ifndef NO_PRECOND KRATOS_WATCH("SOLVING DEFLATED PRESSURE") SolveDeflatedPressure( output, r, S_deflated, W); KRATOS_WATCH("SOLVED DEFLATED PRESSURE") //update x: by modifying its part corresponding to pressure WritePPart (temp, output); TSparseSpaceType::ScaleAndAdd(1.00, temp, 1.00, x); TSparseSpaceType::Mult (A,x,r); TSparseSpaceType::ScaleAndAdd (1.00, b, -1.00, r); //r = b - r //KRATOS_WATCH(r) #endif const double rel_tol = tol*normb; double beta = TSparseSpaceType::TwoNorm (r); if (beta <= rel_tol) //finalize! { tol = beta / normb; max_iter = 0; return 0; } unsigned int j; int err = 0; std::vector< VectorType > V (m+1); for (j = 0; j <= m; ++j) V[j].resize (dim,false); j = 1; while (j <= max_iter) { TSparseSpaceType::Assign (V[0], 1.0/beta, r); //V[0] = r /(T)beta; TSparseSpaceType::SetToZero (s); s[0] = beta; for (unsigned int i = 0; (i < m) && (j <= max_iter); ++i, ++j) { TSparseSpaceType::Mult (A,V[i],w); //w = A*V[i]; for (unsigned int k = 0; k <= i; k++) { H (k, i) = TSparseSpaceType::Dot (V[k], w); w -= H (k, i) * V[k]; } #ifndef NO_PRECOND Modify_w( w, W, dim, p_dim, red_dim); #endif const double normw = TSparseSpaceType::TwoNorm (w); H (i+1, i) = normw; // This breakdown is a good one ... if (normw == 0) TSparseSpaceType::Copy (V[i+1], w); //V[i+1] = w; else TSparseSpaceType::Assign (V[i+1], 1.0/normw, w); //V[i+1] = w / normw; for (unsigned int k = 0; k < i; k++) ApplyPlaneRotation (H (k,i), H (k+1,i), cs (k), sn (k) ); GeneratePlaneRotation (H (i,i), H (i+1,i), cs (i), sn (i) ); ApplyPlaneRotation (H (i,i), H (i+1,i), cs (i), sn (i) ); ApplyPlaneRotation (s (i), s (i+1), cs (i), sn (i) ); beta = fabs (s (i+1) ); std::cout << "iter = " << j << " estimated res ratio = " << beta << std::endl; //KRATOS_WATCH (beta); if (beta <= rel_tol) { this->Update (y, x, i, H, s, V); //WRITE THE NUMBER OF ITERATIONS INTO A FILE myfile <<j<<"\n"; return 0; } //IF WE SURPASS THE MAX ITERATION NUMBER WE WILL ALSO PRINT IT TO FILE else if (j>=max_iter) myfile <<j<<"\n"; } this->Update (y,x, m - 1, H, s, V); //r = b - Ax TSparseSpaceType::Mult (A,x,r); TSparseSpaceType::ScaleAndAdd (1.00, b, -1.00, r); //r = b - r beta = TSparseSpaceType::TwoNorm (r); std::cout << "number of iterations at convergence = " << j << std::endl; if (beta < rel_tol) { return 0; } ++restart; } err = 1; return err; } void CheckDeflatedMatrix(SparseMatrixType& S_deflated) { std::size_t reduced_size = S_deflated.size1(); VectorType identity(reduced_size,1.0); VectorType res(reduced_size,0.0); TSparseSpaceType::Mult (S_deflated,identity,res); KRATOS_WATCH(res) KRATOS_WATCH(norm_2(res)) } //FUNCTION THAT SOLVES THE SYSTEM WTLW*lambda=WT*r or WTLW*d_lambda=WT*w.. in the first case output=W*lambda, in the second ouput=W*d_lambda //void SolveDeflatedPressure( VectorType& output, VectorType& r, SparseMatrixType& S_deflated, std::vector<int>& W, LUSkylineFactorization<TSparseSpaceType, TDenseSpaceType>& Factorization) void SolveDeflatedPressure( VectorType& output, VectorType& r, SparseMatrixType& S_deflated, std::vector<int>& W) { /////////////////////////////////////////////////////////// // put here deflation i.e. solve for WTLWp=WTr fixing w //ww is a deflation matrix W written in a vector format //extracted the part of the residual corresponding to the pressure - r_p VectorType rp; //get the lower part of the residual vector, corresponding to pressure dofs GetPPart (r, rp); std::size_t reduced_size = S_deflated.size1(); //std::size_t full_size = mS.size1(); VectorType WT_rp(reduced_size), lambda(reduced_size); //w_T_r is the residual multiplied by the WT DeflationUtils::ApplyWtranspose(W, rp, WT_rp); mPred_solver->Solve(S_deflated, lambda, WT_rp); KRATOS_WATCH(norm_2(lambda) ); DeflationUtils::ApplyW(W, lambda, output); /////////////////////////////////////////////////////////////////////////////////////////////// } //makes w orthogonal to W void Modify_w( VectorType& w, std::vector<int>& W, std::size_t full_glob_size, std::size_t full_size, std::size_t reduced_size) { //std::size_t full_glob_size = A.size1(); //std::size_t reduced_size = S_deflated.size1(); //std::size_t full_size = mS.size1(); VectorType wp (full_size); VectorType WT_wp(reduced_size); VectorType W_WT_wp (full_size); VectorType temp (full_glob_size); GetPPart(w,wp); VectorType ModulusSquared(reduced_size); VectorType identity(full_size,1.0); DeflationUtils::ApplyWtranspose(W, identity, ModulusSquared); DeflationUtils::ApplyWtranspose(W, wp, WT_wp); //scale down Wt_wp with the squared modulus for(unsigned int i=0; i<reduced_size; i++) { WT_wp[i] /= ModulusSquared[i]; } DeflationUtils::ApplyW(W, WT_wp, W_WT_wp); wp-=W_WT_wp; WritePPart(w,wp); } //this function extracts from a vector which has the size of the //overall r, the part that corresponds to u-dofs void GetUPart (const VectorType& rtot, VectorType& ru) { if (ru.size() != mother_indices.size() ) ru.resize (mother_indices.size(), false); #pragma omp parallel for for (int i = 0; i<static_cast<int>(ru.size()); i++) ru[i] = rtot[mother_indices[i]]; } //this function extracts from a vector which has the size of the //overall r, the part that corresponds to p-dofs void GetPPart (const VectorType& rtot, VectorType& rp) { if (rp.size() != mpressure_indices.size() ) rp.resize (mpressure_indices.size(), false); #pragma omp parallel for for (int i = 0; i<static_cast<int>(rp.size()); i++) rp[i] = rtot[mpressure_indices[i]]; } void WriteUPart (VectorType& rtot, const VectorType& ru) { #pragma omp parallel for for (int i = 0; i< static_cast<int>(ru.size()); i++) rtot[mother_indices[i]] = ru[i]; } void WritePPart (VectorType& rtot, const VectorType& rp) { #pragma omp parallel for for (int i = 0; i< static_cast<int>(rp.size()); i++) rtot[mpressure_indices[i]] = rp[i]; } void ComputeDiagonalByLumping (SparseMatrixType& A,VectorType& diagA) { if (diagA.size() != A.size1() ) diagA.resize (A.size1() ); //get access to A data const std::size_t* index1 = A.index1_data().begin(); // const std::size_t* index2 = A.index2_data().begin(); const double* values = A.value_data().begin(); #pragma omp parallel for for (int i=0; i< static_cast<int>(A.size1()); i++) { unsigned int row_begin = index1[i]; unsigned int row_end = index1[i+1]; double temp = 0.0; for (unsigned int j=row_begin; j<row_end; j++) temp += values[j]*values[j]; diagA[i] = sqrt(temp); } } double CheckMatrix (SparseMatrixType& A) { //get access to A data const std::size_t* index1 = A.index1_data().begin(); const std::size_t* index2 = A.index2_data().begin(); const double* values = A.value_data().begin(); double norm = 0.0; for (unsigned int i=0; i<A.size1(); i++) { unsigned int row_begin = index1[i]; unsigned int row_end = index1[i+1]; if (row_end - row_begin == 0) std::cout << "line " << i << " has no elements" << std::endl; //KRATOS_THROW_ERROR(std::logic_error, "line found with no entries on line ",i) for (unsigned int j=row_begin; j<row_end; j++) { if (index2[j]>A.size2() ) KRATOS_THROW_ERROR (std::logic_error, "array above size of A","") norm += values[j]*values[j]; } } return sqrt (norm); } /// Helper function for Sytem matrix functions void SortCols ( std::vector<unsigned int>& ColList, std::size_t& NumCols) { bool swap = true; unsigned int d = NumCols; int temp; while ( swap || d > 1 ) { swap = false; d = (d+1) /2; for ( unsigned int i=0; i< (NumCols - d); i++) if ( ColList[i+d] < ColList[i] ) { temp = ColList[i+d]; ColList[i+d] = ColList[i]; ColList[i] = temp; swap = true; } } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class DeflatedGMRESSolver ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> inline std::istream& operator >> (std::istream& IStream, DeflatedGMRESSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis) { return IStream; } /// output stream function template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> inline std::ostream& operator << (std::ostream& rOStream, const DeflatedGMRESSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis) { rThis.PrintInfo (rOStream); rOStream << std::endl; rThis.PrintData (rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_DEFLATED_GMRES_SOLVER_H_INCLUDED defined
move_particle_utility.h
/* ============================================================================== KratosIncompressibleFluidApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: pbecker $ // Date: $Date: 2011-09-21 12:30:32 $ // Revision: $Revision: 1.0 $ // // #if !defined(KRATOS_MOVE_PART_UTILITY_INCLUDED ) #define KRATOS_MOVE_PART_UTILITY_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/node.h" #include "spatial_containers/spatial_containers.h" #include "spatial_containers/cell.h" #include "spatial_containers/bins_dynamic_objects.h" #include "utilities/spatial_containers_configure.h" namespace Kratos { //this class is to be modified by the user to customize the interpolation process template< unsigned int TDim> class MoveParticleUtility { public: typedef SpatialContainersConfigure<TDim> Configure; typedef typename Configure::PointType PointType; //typedef PointType::CoordinatesArrayType CoordinatesArrayType; typedef typename Configure::ContainerType ContainerType; //typedef Configure::PointerType PointerType; typedef typename Configure::IteratorType IteratorType; typedef typename Configure::ResultContainerType ResultContainerType; //typedef Configure::ResultPointerType ResultPointerType; typedef typename Configure::ResultIteratorType ResultIteratorType; //typedef Configure::ContactPairType ContactPairType; //typedef Configure::ContainerContactType ContainerContactType; //typedef Configure::IteratorContactType IteratorContactType; //typedef Configure::PointerContactType PointerContactType; //typedef Configure::PointerTypeIterator PointerTypeIterator; KRATOS_CLASS_POINTER_DEFINITION(MoveParticleUtility); MoveParticleUtility(ModelPart& model_part , ModelPart& particle_model_part) : mr_model_part(model_part), mr_particle_model_part(particle_model_part) {} ~MoveParticleUtility() {} void MountBin() { KRATOS_TRY //copy the elements to a new container, as the list will //be shuffled duringthe construction of the tree ContainerType& rElements = mr_model_part.ElementsArray(); IteratorType it_begin = rElements.begin(); IteratorType it_end = rElements.end(); typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) ); paux.swap(mpBinsObjectDynamic); //BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end ); KRATOS_WATCH("inside MountBin") KRATOS_CATCH("") } void MoveParticles() { KRATOS_TRY array_1d<double,TDim+1> N; const int max_results = 1000; ResultContainerType results(max_results); double Dt= 0.1; double nsubsteps = 1.0; int nparticles = mr_particle_model_part.Nodes().size(); #pragma omp parallel for firstprivate(results,N,Dt,nsubsteps) for(int i=0; i<nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle=mr_particle_model_part.NodesBegin() + i; Node<3>::Pointer pparticle = *(iparticle.base()); ResultIteratorType result_begin = results.begin(); MoveParticle(VELOCITY,Dt,nsubsteps,pparticle,N,result_begin,max_results); } KRATOS_WATCH("inside Moveparticle") KRATOS_CATCH("") } protected: private: ///this function moves a particle according to the "velocity" given ///by "rVariable". The movement is performed in nsubsteps, during a total time ///of Dt void MoveParticle( Variable< array_1d<double,3> >& rVariable, double Dt, double nsubsteps, Node<3>::Pointer pparticle, array_1d<double,TDim+1>& N, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { Element::Pointer pelement; double substep_dt = Dt/nsubsteps; array_1d<double,3> vel; for(unsigned int i=0; i<nsubsteps; i++) { bool is_found = FindNodeOnMesh( pparticle, N,pelement,result_begin,MaxNumberOfResults); KRATOS_WATCH(pparticle->Id()); KRATOS_WATCH(is_found); if(is_found == true) { KRATOS_WATCH(pelement->Id()); KRATOS_WATCH(N); KRATOS_WATCH("****"); Geometry< Node<3> >& geom = pelement->GetGeometry(); //get "velocity at position" noalias(vel) = geom[0].FastGetSolutionStepValue(rVariable); for(unsigned int j=0; j<TDim+1; j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(rVariable); } //do substep movement noalias(pparticle->Coordinates()) += substep_dt * vel; } } } ///this function should find the element into which a given node is located ///and return a pointer to the element and the vector containing the ///shape functions that define the postion within the element ///if "false" is devolved the element is not found bool FindNodeOnMesh( Node<3>::Pointer pparticle, array_1d<double,TDim+1>& N, Element::Pointer& pelement, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; //ask to the container for the list of candidate elements const array_1d<double,3>& coords = pparticle->Coordinates(); SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(coords, result_begin, MaxNumberOfResults ); if(results_found>0) { //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement = (*(result_begin+i)); return true; } } } //not found case return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.0000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } inline double CalculateVol(const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0)); } //*************************************** //*************************************** inline double CalculateVol(const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ * 0.1666666666666666666667; } ModelPart& mr_model_part; ModelPart& mr_particle_model_part; typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic; }; } // namespace Kratos. #endif // KRATOS_MOVE_PART_UTILITY_INCLUDED defined
GB_dense_ewise3_noaccum_template.c
//------------------------------------------------------------------------------ // GB_dense_ewise3_noaccum_template: C = A+B where all 3 matrices are dense //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB_unused.h" { //-------------------------------------------------------------------------- // get A, B, and C //-------------------------------------------------------------------------- // any matrix may be aliased to any other (C==A, C==B, and/or A==B) GB_ATYPE *Ax = (GB_ATYPE *) A->x ; GB_BTYPE *Bx = (GB_BTYPE *) B->x ; GB_CTYPE *Cx = (GB_CTYPE *) C->x ; const int64_t cnz = GB_NNZ (C) ; ASSERT (GB_is_dense (A)) ; ASSERT (GB_is_dense (B)) ; ASSERT (GB_is_dense (C)) ; int64_t p ; //-------------------------------------------------------------------------- // C = A+B where all 3 matrices are dense //-------------------------------------------------------------------------- #if GB_CTYPE_IS_BTYPE if (C == B) { //---------------------------------------------------------------------- // C = A+C where A and C are dense //---------------------------------------------------------------------- // C and B cannot be aliased if their types differ #if defined ( GB_HAS_CBLAS ) && GB_OP_IS_PLUS_REAL // C += A via GB_cblas_saxpy or GB_cblas_daxpy GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Ax, Cx, nthreads) ; // C += A #elif defined ( GB_HAS_CBLAS ) && GB_OP_IS_MINUS_REAL // C -= A via GB_cblas_saxpy or GB_cblas_daxpy GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Ax, Cx, nthreads) ; // C -= A #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { GB_GETA (aij, Ax, p) ; // aij = Ax [p] // Cx [p] = aij + Cx [p] GB_BINOP (GB_CX (p), aij, GB_CX (p), 0, 0) ; } #endif } else #endif #if GB_CTYPE_IS_ATYPE if (C == A) { //---------------------------------------------------------------------- // C = C+B where B and C are dense //---------------------------------------------------------------------- #if defined ( GB_HAS_CBLAS ) && GB_OP_IS_PLUS_REAL // C += B via GB_cblas_saxpy or GB_cblas_daxpy GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Bx, Cx, nthreads) ; // C += B #elif defined ( GB_HAS_CBLAS ) && GB_OP_IS_MINUS_REAL // C -= B via GB_cblas_saxpy or GB_cblas_daxpy GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Bx, Cx, nthreads) ; // C -= B #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { GB_GETB (bij, Bx, p) ; // bij = Bx [p] GB_BINOP (GB_CX (p), GB_CX (p), bij, 0, 0) ; // Cx [p] += bij } #endif } else #endif { //---------------------------------------------------------------------- // C = A+B where all 3 matrices are dense //---------------------------------------------------------------------- // note that A and B may still be aliased to each other #if defined ( GB_HAS_CBLAS ) && GB_OP_IS_PLUS_REAL // C = A+B via GB_cblas_saxpy or GB_cblas_daxpy GB_memcpy (Cx, Ax, cnz * sizeof (GB_CTYPE), nthreads) ; // C = A GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Bx, Cx, nthreads) ; // C += B #elif defined ( GB_HAS_CBLAS ) && GB_OP_IS_MINUS_REAL // C = A-B via GB_cblas_saxpy or GB_cblas_daxpy GB_memcpy (Cx, Ax, cnz * sizeof (GB_CTYPE), nthreads) ; // C = A GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Bx, Cx, nthreads) ; // C -= B #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { GB_GETA (aij, Ax, p) ; // aij = Ax [p] GB_GETB (bij, Bx, p) ; // bij = Bx [p] GB_BINOP (GB_CX (p), aij, bij, 0, 0) ; // Cx [p] = aij + bij } #endif } }
omp-sections.c
#include <stdio.h> #include <unistd.h> #include <omp.h> int main(int argc, char **argv) { int x = 0; #pragma omp parallel { printf("entered the parallel region\n"); #pragma omp sections { printf("entered the sections region\n"); #pragma omp section { sleep(1); printf("section 1, by thread %d\n", omp_get_thread_num()); } #pragma omp section { sleep(2); printf("section 2, by thread %d\n", omp_get_thread_num()); } #pragma omp section { sleep(3); printf("section 3, by thread %d\n", omp_get_thread_num()); } #pragma omp section { sleep(4); printf("section 4, by thread %d\n", omp_get_thread_num()); } #pragma omp section { sleep(5); printf("section 5, by thread %d\n", omp_get_thread_num()); } } } return 0; }
tree-pretty-print.c
/* Pretty formatting of GENERIC trees in C syntax. Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. Adapted from c-pretty-print.c by Diego Novillo <dnovillo@redhat.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "output.h" #include "diagnostic.h" #include "real.h" #include "hashtab.h" #include "tree-flow.h" #include "langhooks.h" #include "tree-iterator.h" #include "tree-chrec.h" #include "tree-pass.h" #include "fixed-value.h" #include "value-prof.h" #include "predict.h" /* Local functions, macros and variables. */ static const char *op_symbol (const_tree); static void pretty_print_string (pretty_printer *, const char*); static void newline_and_indent (pretty_printer *, int); static void maybe_init_pretty_print (FILE *); static void print_struct_decl (pretty_printer *, const_tree, int, int); static void do_niy (pretty_printer *, const_tree); #define INDENT(SPACE) do { \ int i; for (i = 0; i<SPACE; i++) pp_space (buffer); } while (0) #define NIY do_niy(buffer,node) static pretty_printer buffer; static int initialized = 0; /* Try to print something for an unknown tree code. */ static void do_niy (pretty_printer *buffer, const_tree node) { int i, len; pp_string (buffer, "<<< Unknown tree: "); pp_string (buffer, tree_code_name[(int) TREE_CODE (node)]); if (EXPR_P (node)) { len = TREE_OPERAND_LENGTH (node); for (i = 0; i < len; ++i) { newline_and_indent (buffer, 2); dump_generic_node (buffer, TREE_OPERAND (node, i), 2, 0, false); } } pp_string (buffer, " >>>\n"); } /* Debugging function to print out a generic expression. */ void debug_generic_expr (tree t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS); fprintf (stderr, "\n"); } /* Debugging function to print out a generic statement. */ void debug_generic_stmt (tree t) { print_generic_stmt (stderr, t, TDF_VOPS|TDF_MEMSYMS); fprintf (stderr, "\n"); } /* Debugging function to print out a chain of trees . */ void debug_tree_chain (tree t) { struct pointer_set_t *seen = pointer_set_create (); while (t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID); fprintf (stderr, " "); t = TREE_CHAIN (t); if (pointer_set_insert (seen, t)) { fprintf (stderr, "... [cycled back to "); print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID); fprintf (stderr, "]"); break; } } fprintf (stderr, "\n"); pointer_set_destroy (seen); } /* Prints declaration DECL to the FILE with details specified by FLAGS. */ void print_generic_decl (FILE *file, tree decl, int flags) { maybe_init_pretty_print (file); print_declaration (&buffer, decl, 2, flags); pp_write_text_to_stream (&buffer); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. */ void print_generic_stmt (FILE *file, tree t, int flags) { maybe_init_pretty_print (file); dump_generic_node (&buffer, t, 0, flags, true); pp_flush (&buffer); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. The output is indented by INDENT spaces. */ void print_generic_stmt_indented (FILE *file, tree t, int flags, int indent) { int i; maybe_init_pretty_print (file); for (i = 0; i < indent; i++) pp_space (&buffer); dump_generic_node (&buffer, t, indent, flags, true); pp_flush (&buffer); } /* Print a single expression T on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. */ void print_generic_expr (FILE *file, tree t, int flags) { maybe_init_pretty_print (file); dump_generic_node (&buffer, t, 0, flags, false); } /* Dump the name of a _DECL node and its DECL_UID if TDF_UID is set in FLAGS. */ static void dump_decl_name (pretty_printer *buffer, tree node, int flags) { if (DECL_NAME (node)) { if ((flags & TDF_ASMNAME) && DECL_ASSEMBLER_NAME_SET_P (node)) pp_tree_identifier (buffer, DECL_ASSEMBLER_NAME (node)); else pp_tree_identifier (buffer, DECL_NAME (node)); } if ((flags & TDF_UID) || DECL_NAME (node) == NULL_TREE) { if (TREE_CODE (node) == LABEL_DECL && LABEL_DECL_UID (node) != -1) pp_printf (buffer, "L.%d", (int) LABEL_DECL_UID (node)); else if (TREE_CODE (node) == DEBUG_EXPR_DECL) { if (flags & TDF_NOUID) pp_string (buffer, "D#xxxx"); else pp_printf (buffer, "D#%i", DEBUG_TEMP_UID (node)); } else { char c = TREE_CODE (node) == CONST_DECL ? 'C' : 'D'; if (flags & TDF_NOUID) pp_printf (buffer, "%c.xxxx", c); else pp_printf (buffer, "%c.%u", c, DECL_UID (node)); } } } /* Like the above, but used for pretty printing function calls. */ static void dump_function_name (pretty_printer *buffer, tree node, int flags) { if (TREE_CODE (node) == NOP_EXPR) node = TREE_OPERAND (node, 0); if (DECL_NAME (node) && (flags & TDF_ASMNAME) == 0) pp_string (buffer, lang_hooks.decl_printable_name (node, 1)); else dump_decl_name (buffer, node, flags); } /* Dump a function declaration. NODE is the FUNCTION_TYPE. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_function_declaration (pretty_printer *buffer, tree node, int spc, int flags) { bool wrote_arg = false; tree arg; pp_space (buffer); pp_character (buffer, '('); /* Print the argument types. The last element in the list is a VOID_TYPE. The following avoids printing the last element. */ arg = TYPE_ARG_TYPES (node); while (arg && TREE_CHAIN (arg) && arg != error_mark_node) { wrote_arg = true; dump_generic_node (buffer, TREE_VALUE (arg), spc, flags, false); arg = TREE_CHAIN (arg); if (TREE_CHAIN (arg) && TREE_CODE (TREE_CHAIN (arg)) == TREE_LIST) { pp_character (buffer, ','); pp_space (buffer); } } if (!wrote_arg) pp_string (buffer, "void"); pp_character (buffer, ')'); } /* Dump the domain associated with an array. */ static void dump_array_domain (pretty_printer *buffer, tree domain, int spc, int flags) { pp_character (buffer, '['); if (domain) { tree min = TYPE_MIN_VALUE (domain); tree max = TYPE_MAX_VALUE (domain); if (min && max && integer_zerop (min) && host_integerp (max, 0)) pp_wide_integer (buffer, TREE_INT_CST_LOW (max) + 1); else { if (min) dump_generic_node (buffer, min, spc, flags, false); pp_character (buffer, ':'); if (max) dump_generic_node (buffer, max, spc, flags, false); } } else pp_string (buffer, "<unknown>"); pp_character (buffer, ']'); } /* Dump OpenMP clause CLAUSE. BUFFER, CLAUSE, SPC and FLAGS are as in dump_generic_node. */ static void dump_omp_clause (pretty_printer *buffer, tree clause, int spc, int flags) { const char *name; switch (OMP_CLAUSE_CODE (clause)) { case OMP_CLAUSE_PRIVATE: name = "private"; goto print_remap; case OMP_CLAUSE_SHARED: name = "shared"; goto print_remap; case OMP_CLAUSE_FIRSTPRIVATE: name = "firstprivate"; goto print_remap; case OMP_CLAUSE_LASTPRIVATE: name = "lastprivate"; goto print_remap; case OMP_CLAUSE_COPYIN: name = "copyin"; goto print_remap; case OMP_CLAUSE_COPYPRIVATE: name = "copyprivate"; goto print_remap; print_remap: pp_string (buffer, name); pp_character (buffer, '('); dump_generic_node (buffer, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_REDUCTION: pp_string (buffer, "reduction("); pp_string (buffer, op_symbol_code (OMP_CLAUSE_REDUCTION_CODE (clause))); pp_character (buffer, ':'); dump_generic_node (buffer, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_IF: pp_string (buffer, "if("); dump_generic_node (buffer, OMP_CLAUSE_IF_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_NUM_THREADS: pp_string (buffer, "num_threads("); dump_generic_node (buffer, OMP_CLAUSE_NUM_THREADS_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_NOWAIT: pp_string (buffer, "nowait"); break; case OMP_CLAUSE_ORDERED: pp_string (buffer, "ordered"); break; case OMP_CLAUSE_DEFAULT: pp_string (buffer, "default("); switch (OMP_CLAUSE_DEFAULT_KIND (clause)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: pp_string (buffer, "shared"); break; case OMP_CLAUSE_DEFAULT_NONE: pp_string (buffer, "none"); break; case OMP_CLAUSE_DEFAULT_PRIVATE: pp_string (buffer, "private"); break; case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE: pp_string (buffer, "firstprivate"); break; default: gcc_unreachable (); } pp_character (buffer, ')'); break; case OMP_CLAUSE_SCHEDULE: pp_string (buffer, "schedule("); switch (OMP_CLAUSE_SCHEDULE_KIND (clause)) { case OMP_CLAUSE_SCHEDULE_STATIC: pp_string (buffer, "static"); break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: pp_string (buffer, "dynamic"); break; case OMP_CLAUSE_SCHEDULE_GUIDED: pp_string (buffer, "guided"); break; case OMP_CLAUSE_SCHEDULE_RUNTIME: pp_string (buffer, "runtime"); break; case OMP_CLAUSE_SCHEDULE_AUTO: pp_string (buffer, "auto"); break; default: gcc_unreachable (); } if (OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause)) { pp_character (buffer, ','); dump_generic_node (buffer, OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause), spc, flags, false); } pp_character (buffer, ')'); break; case OMP_CLAUSE_UNTIED: pp_string (buffer, "untied"); break; case OMP_CLAUSE_COLLAPSE: pp_string (buffer, "collapse("); dump_generic_node (buffer, OMP_CLAUSE_COLLAPSE_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; default: /* Should never happen. */ dump_generic_node (buffer, clause, spc, flags, false); break; } } /* Dump the list of OpenMP clauses. BUFFER, SPC and FLAGS are as in dump_generic_node. */ void dump_omp_clauses (pretty_printer *buffer, tree clause, int spc, int flags) { if (clause == NULL) return; pp_space (buffer); while (1) { dump_omp_clause (buffer, clause, spc, flags); clause = OMP_CLAUSE_CHAIN (clause); if (clause == NULL) return; pp_space (buffer); } } /* Dump location LOC to BUFFER. */ static void dump_location (pretty_printer *buffer, location_t loc) { expanded_location xloc = expand_location (loc); pp_character (buffer, '['); if (xloc.file) { pp_string (buffer, xloc.file); pp_string (buffer, " : "); } pp_decimal_int (buffer, xloc.line); pp_string (buffer, "] "); } /* Dump lexical block BLOCK. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_block_node (pretty_printer *buffer, tree block, int spc, int flags) { tree t; pp_printf (buffer, "BLOCK #%d ", BLOCK_NUMBER (block)); if (flags & TDF_ADDRESS) pp_printf (buffer, "[%p] ", (void *) block); if (BLOCK_ABSTRACT (block)) pp_string (buffer, "[abstract] "); if (TREE_ASM_WRITTEN (block)) pp_string (buffer, "[written] "); if (flags & TDF_SLIM) return; if (BLOCK_SOURCE_LOCATION (block)) dump_location (buffer, BLOCK_SOURCE_LOCATION (block)); newline_and_indent (buffer, spc + 2); if (BLOCK_SUPERCONTEXT (block)) { pp_string (buffer, "SUPERCONTEXT: "); dump_generic_node (buffer, BLOCK_SUPERCONTEXT (block), 0, flags | TDF_SLIM, false); newline_and_indent (buffer, spc + 2); } if (BLOCK_SUBBLOCKS (block)) { pp_string (buffer, "SUBBLOCKS: "); for (t = BLOCK_SUBBLOCKS (block); t; t = BLOCK_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (BLOCK_CHAIN (block)) { pp_string (buffer, "SIBLINGS: "); for (t = BLOCK_CHAIN (block); t; t = BLOCK_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (BLOCK_VARS (block)) { pp_string (buffer, "VARS: "); for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (VEC_length (tree, BLOCK_NONLOCALIZED_VARS (block)) > 0) { unsigned i; VEC(tree,gc) *nlv = BLOCK_NONLOCALIZED_VARS (block); pp_string (buffer, "NONLOCALIZED_VARS: "); for (i = 0; VEC_iterate (tree, nlv, i, t); i++) { dump_generic_node (buffer, t, 0, flags, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (BLOCK_ABSTRACT_ORIGIN (block)) { pp_string (buffer, "ABSTRACT_ORIGIN: "); dump_generic_node (buffer, BLOCK_ABSTRACT_ORIGIN (block), 0, flags | TDF_SLIM, false); newline_and_indent (buffer, spc + 2); } if (BLOCK_FRAGMENT_ORIGIN (block)) { pp_string (buffer, "FRAGMENT_ORIGIN: "); dump_generic_node (buffer, BLOCK_FRAGMENT_ORIGIN (block), 0, flags | TDF_SLIM, false); newline_and_indent (buffer, spc + 2); } if (BLOCK_FRAGMENT_CHAIN (block)) { pp_string (buffer, "FRAGMENT_CHAIN: "); for (t = BLOCK_FRAGMENT_CHAIN (block); t; t = BLOCK_FRAGMENT_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } } /* Dump the node NODE on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). If IS_STMT is true, the object printed is considered to be a statement and it is terminated by ';' if appropriate. */ int dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, bool is_stmt) { tree type; tree op0, op1; const char *str; bool is_expr; if (node == NULL_TREE) return spc; is_expr = EXPR_P (node); if (is_stmt && (flags & TDF_STMTADDR)) pp_printf (buffer, "<&%p> ", (void *)node); if ((flags & TDF_LINENO) && EXPR_HAS_LOCATION (node)) dump_location (buffer, EXPR_LOCATION (node)); switch (TREE_CODE (node)) { case ERROR_MARK: pp_string (buffer, "<<< error >>>"); break; case IDENTIFIER_NODE: pp_tree_identifier (buffer, node); break; case TREE_LIST: while (node && node != error_mark_node) { if (TREE_PURPOSE (node)) { dump_generic_node (buffer, TREE_PURPOSE (node), spc, flags, false); pp_space (buffer); } dump_generic_node (buffer, TREE_VALUE (node), spc, flags, false); node = TREE_CHAIN (node); if (node && TREE_CODE (node) == TREE_LIST) { pp_character (buffer, ','); pp_space (buffer); } } break; case TREE_BINFO: dump_generic_node (buffer, BINFO_TYPE (node), spc, flags, false); break; case TREE_VEC: { size_t i; if (TREE_VEC_LENGTH (node) > 0) { size_t len = TREE_VEC_LENGTH (node); for (i = 0; i < len - 1; i++) { dump_generic_node (buffer, TREE_VEC_ELT (node, i), spc, flags, false); pp_character (buffer, ','); pp_space (buffer); } dump_generic_node (buffer, TREE_VEC_ELT (node, len - 1), spc, flags, false); } } break; case VOID_TYPE: case INTEGER_TYPE: case REAL_TYPE: case FIXED_POINT_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: { unsigned int quals = TYPE_QUALS (node); enum tree_code_class tclass; if (quals & TYPE_QUAL_CONST) pp_string (buffer, "const "); else if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, "volatile "); else if (quals & TYPE_QUAL_RESTRICT) pp_string (buffer, "restrict "); if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (node))) { pp_string (buffer, "<address-space-"); pp_decimal_int (buffer, TYPE_ADDR_SPACE (node)); pp_string (buffer, "> "); } tclass = TREE_CODE_CLASS (TREE_CODE (node)); if (tclass == tcc_declaration) { if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else pp_string (buffer, "<unnamed type decl>"); } else if (tclass == tcc_type) { if (TYPE_NAME (node)) { if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE) pp_tree_identifier (buffer, TYPE_NAME (node)); else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_string (buffer, "<unnamed type>"); } else if (TREE_CODE (node) == VECTOR_TYPE) { pp_string (buffer, "vector "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else if (TREE_CODE (node) == INTEGER_TYPE) { pp_string (buffer, (TYPE_UNSIGNED (node) ? "<unnamed-unsigned:" : "<unnamed-signed:")); pp_decimal_int (buffer, TYPE_PRECISION (node)); pp_string (buffer, ">"); } else if (TREE_CODE (node) == COMPLEX_TYPE) { pp_string (buffer, "__complex__ "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else if (TREE_CODE (node) == REAL_TYPE) { pp_string (buffer, "<float:"); pp_decimal_int (buffer, TYPE_PRECISION (node)); pp_string (buffer, ">"); } else if (TREE_CODE (node) == FIXED_POINT_TYPE) { pp_string (buffer, "<fixed-point-"); pp_string (buffer, TYPE_SATURATING (node) ? "sat:" : "nonsat:"); pp_decimal_int (buffer, TYPE_PRECISION (node)); pp_string (buffer, ">"); } else pp_string (buffer, "<unnamed type>"); } break; } case POINTER_TYPE: case REFERENCE_TYPE: str = (TREE_CODE (node) == POINTER_TYPE ? "*" : "&"); if (TREE_TYPE (node) == NULL) { pp_string (buffer, str); pp_string (buffer, "<null type>"); } else if (TREE_CODE (TREE_TYPE (node)) == FUNCTION_TYPE) { tree fnode = TREE_TYPE (node); dump_generic_node (buffer, TREE_TYPE (fnode), spc, flags, false); pp_space (buffer); pp_character (buffer, '('); pp_string (buffer, str); if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_printf (buffer, "<T%x>", TYPE_UID (node)); pp_character (buffer, ')'); dump_function_declaration (buffer, fnode, spc, flags); } else { unsigned int quals = TYPE_QUALS (node); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_space (buffer); pp_string (buffer, str); if (quals & TYPE_QUAL_CONST) pp_string (buffer, " const"); if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, " volatile"); if (quals & TYPE_QUAL_RESTRICT) pp_string (buffer, " restrict"); if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (node))) { pp_string (buffer, " <address-space-"); pp_decimal_int (buffer, TYPE_ADDR_SPACE (node)); pp_string (buffer, ">"); } if (TYPE_REF_CAN_ALIAS_ALL (node)) pp_string (buffer, " {ref-all}"); } break; case OFFSET_TYPE: NIY; break; case TARGET_MEM_REF: { const char *sep = ""; tree tmp; pp_string (buffer, "MEM["); tmp = TMR_SYMBOL (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "symbol: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_BASE (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "base: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_INDEX (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "index: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_STEP (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "step: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_OFFSET (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "offset: "); dump_generic_node (buffer, tmp, spc, flags, false); } pp_string (buffer, "]"); if (flags & TDF_DETAILS) { pp_string (buffer, "{"); dump_generic_node (buffer, TMR_ORIGINAL (node), spc, flags, false); pp_string (buffer, "}"); } } break; case ARRAY_TYPE: { tree tmp; /* Print the innermost component type. */ for (tmp = TREE_TYPE (node); TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) ; dump_generic_node (buffer, tmp, spc, flags, false); /* Print the dimensions. */ for (tmp = node; TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags); break; } case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { unsigned int quals = TYPE_QUALS (node); if (quals & TYPE_QUAL_CONST) pp_string (buffer, "const "); if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, "volatile "); /* Print the name of the structure. */ if (TREE_CODE (node) == RECORD_TYPE) pp_string (buffer, "struct "); else if (TREE_CODE (node) == UNION_TYPE) pp_string (buffer, "union "); if (TYPE_NAME (node)) dump_generic_node (buffer, TYPE_NAME (node), spc, flags, false); else if (!(flags & TDF_SLIM)) /* FIXME: If we eliminate the 'else' above and attempt to show the fields for named types, we may get stuck following a cycle of pointers to structs. The alleged self-reference check in print_struct_decl will not detect cycles involving more than one pointer or struct type. */ print_struct_decl (buffer, node, spc, flags); break; } case LANG_TYPE: NIY; break; case INTEGER_CST: if (TREE_CODE (TREE_TYPE (node)) == POINTER_TYPE) { /* In the case of a pointer, one may want to divide by the size of the pointed-to type. Unfortunately, this not straightforward. The C front-end maps expressions (int *) 5 int *p; (p + 5) in such a way that the two INTEGER_CST nodes for "5" have different values but identical types. In the latter case, the 5 is multiplied by sizeof (int) in c-common.c (pointer_int_sum) to convert it to a byte address, and yet the type of the node is left unchanged. Argh. What is consistent though is that the number value corresponds to bytes (UNITS) offset. NB: Neither of the following divisors can be trivially used to recover the original literal: TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node))) TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */ pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); pp_string (buffer, "B"); /* pseudo-unit */ } else if (! host_integerp (node, 0)) { tree val = node; unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (val); HOST_WIDE_INT high = TREE_INT_CST_HIGH (val); if (tree_int_cst_sgn (val) < 0) { pp_character (buffer, '-'); high = ~high + !low; low = -low; } /* Would "%x%0*x" or "%x%*0x" get zero-padding on all systems? */ sprintf (pp_buffer (buffer)->digit_buffer, HOST_WIDE_INT_PRINT_DOUBLE_HEX, (unsigned HOST_WIDE_INT) high, low); pp_string (buffer, pp_buffer (buffer)->digit_buffer); } else pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); break; case REAL_CST: /* Code copied from print_node. */ { REAL_VALUE_TYPE d; if (TREE_OVERFLOW (node)) pp_string (buffer, " overflow"); #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC) d = TREE_REAL_CST (node); if (REAL_VALUE_ISINF (d)) pp_string (buffer, REAL_VALUE_NEGATIVE (d) ? " -Inf" : " Inf"); else if (REAL_VALUE_ISNAN (d)) pp_string (buffer, " Nan"); else { char string[100]; real_to_decimal (string, &d, sizeof (string), 0, 1); pp_string (buffer, string); } #else { HOST_WIDE_INT i; unsigned char *p = (unsigned char *) &TREE_REAL_CST (node); pp_string (buffer, "0x"); for (i = 0; i < sizeof TREE_REAL_CST (node); i++) output_formatted_integer (buffer, "%02x", *p++); } #endif break; } case FIXED_CST: { char string[100]; fixed_to_decimal (string, TREE_FIXED_CST_PTR (node), sizeof (string)); pp_string (buffer, string); break; } case COMPLEX_CST: pp_string (buffer, "__complex__ ("); dump_generic_node (buffer, TREE_REALPART (node), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_IMAGPART (node), spc, flags, false); pp_string (buffer, ")"); break; case STRING_CST: pp_string (buffer, "\""); pretty_print_string (buffer, TREE_STRING_POINTER (node)); pp_string (buffer, "\""); break; case VECTOR_CST: { tree elt; pp_string (buffer, "{ "); for (elt = TREE_VECTOR_CST_ELTS (node); elt; elt = TREE_CHAIN (elt)) { dump_generic_node (buffer, TREE_VALUE (elt), spc, flags, false); if (TREE_CHAIN (elt)) pp_string (buffer, ", "); } pp_string (buffer, " }"); } break; case FUNCTION_TYPE: case METHOD_TYPE: dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_space (buffer); if (TREE_CODE (node) == METHOD_TYPE) { if (TYPE_METHOD_BASETYPE (node)) dump_decl_name (buffer, TYPE_NAME (TYPE_METHOD_BASETYPE (node)), flags); else pp_string (buffer, "<null method basetype>"); pp_string (buffer, "::"); } if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_printf (buffer, "<T%x>", TYPE_UID (node)); dump_function_declaration (buffer, node, spc, flags); break; case FUNCTION_DECL: case CONST_DECL: dump_decl_name (buffer, node, flags); break; case LABEL_DECL: if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else if (LABEL_DECL_UID (node) != -1) pp_printf (buffer, "<L%d>", (int) LABEL_DECL_UID (node)); else { if (flags & TDF_NOUID) pp_string (buffer, "<D.xxxx>"); else pp_printf (buffer, "<D.%u>", DECL_UID (node)); } break; case TYPE_DECL: if (DECL_IS_BUILTIN (node)) { /* Don't print the declaration of built-in types. */ break; } if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else { if ((TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) && TYPE_METHODS (TREE_TYPE (node))) { /* The type is a c++ class: all structures have at least 4 methods. */ pp_string (buffer, "class "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else { pp_string (buffer, (TREE_CODE (TREE_TYPE (node)) == UNION_TYPE ? "union" : "struct ")); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } } break; case VAR_DECL: case PARM_DECL: case FIELD_DECL: case DEBUG_EXPR_DECL: case NAMESPACE_DECL: dump_decl_name (buffer, node, flags); break; case RESULT_DECL: pp_string (buffer, "<retval>"); break; case COMPONENT_REF: op0 = TREE_OPERAND (node, 0); str = "."; if (op0 && TREE_CODE (op0) == INDIRECT_REF) { op0 = TREE_OPERAND (op0, 0); str = "->"; } if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); pp_string (buffer, str); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); op0 = component_ref_field_offset (node); if (op0 && TREE_CODE (op0) != INTEGER_CST) { pp_string (buffer, "{off: "); dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, '}'); } break; case BIT_FIELD_REF: pp_string (buffer, "BIT_FIELD_REF <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, ">"); break; case ARRAY_REF: case ARRAY_RANGE_REF: op0 = TREE_OPERAND (node, 0); if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); pp_character (buffer, '['); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); if (TREE_CODE (node) == ARRAY_RANGE_REF) pp_string (buffer, " ..."); pp_character (buffer, ']'); op0 = array_ref_low_bound (node); op1 = array_ref_element_size (node); if (!integer_zerop (op0) || TREE_OPERAND (node, 2) || TREE_OPERAND (node, 3)) { pp_string (buffer, "{lb: "); dump_generic_node (buffer, op0, spc, flags, false); pp_string (buffer, " sz: "); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, '}'); } break; case CONSTRUCTOR: { unsigned HOST_WIDE_INT ix; tree field, val; bool is_struct_init = FALSE; pp_character (buffer, '{'); if (TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) is_struct_init = TRUE; FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val) { if (field && is_struct_init) { pp_character (buffer, '.'); dump_generic_node (buffer, field, spc, flags, false); pp_string (buffer, "="); } if (val && TREE_CODE (val) == ADDR_EXPR) if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL) val = TREE_OPERAND (val, 0); if (val && TREE_CODE (val) == FUNCTION_DECL) dump_decl_name (buffer, val, flags); else dump_generic_node (buffer, val, spc, flags, false); if (ix != VEC_length (constructor_elt, CONSTRUCTOR_ELTS (node)) - 1) { pp_character (buffer, ','); pp_space (buffer); } } pp_character (buffer, '}'); } break; case COMPOUND_EXPR: { tree *tp; if (flags & TDF_SLIM) { pp_string (buffer, "<COMPOUND_EXPR>"); break; } dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent (buffer, spc); else { pp_character (buffer, ','); pp_space (buffer); } for (tp = &TREE_OPERAND (node, 1); TREE_CODE (*tp) == COMPOUND_EXPR; tp = &TREE_OPERAND (*tp, 1)) { dump_generic_node (buffer, TREE_OPERAND (*tp, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent (buffer, spc); else { pp_character (buffer, ','); pp_space (buffer); } } dump_generic_node (buffer, *tp, spc, flags, !(flags & TDF_SLIM)); } break; case STATEMENT_LIST: { tree_stmt_iterator si; bool first = true; if (flags & TDF_SLIM) { pp_string (buffer, "<STATEMENT_LIST>"); break; } for (si = tsi_start (node); !tsi_end_p (si); tsi_next (&si)) { if (!first) newline_and_indent (buffer, spc); else first = false; dump_generic_node (buffer, tsi_stmt (si), spc, flags, true); } } break; case MODIFY_EXPR: case INIT_EXPR: dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); if (TREE_CODE (node) == MODIFY_EXPR && MOVE_NONTEMPORAL (node)) pp_string (buffer, "{nt}"); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); break; case TARGET_EXPR: pp_string (buffer, "TARGET_EXPR <"); dump_generic_node (buffer, TARGET_EXPR_SLOT (node), spc, flags, false); pp_character (buffer, ','); pp_space (buffer); dump_generic_node (buffer, TARGET_EXPR_INITIAL (node), spc, flags, false); pp_character (buffer, '>'); break; case DECL_EXPR: print_declaration (buffer, DECL_EXPR_DECL (node), spc, flags); is_stmt = false; break; case COND_EXPR: if (TREE_TYPE (node) == NULL || TREE_TYPE (node) == void_type_node) { pp_string (buffer, "if ("); dump_generic_node (buffer, COND_EXPR_COND (node), spc, flags, false); pp_character (buffer, ')'); /* The lowered cond_exprs should always be printed in full. */ if (COND_EXPR_THEN (node) && (IS_EMPTY_STMT (COND_EXPR_THEN (node)) || TREE_CODE (COND_EXPR_THEN (node)) == GOTO_EXPR) && COND_EXPR_ELSE (node) && (IS_EMPTY_STMT (COND_EXPR_ELSE (node)) || TREE_CODE (COND_EXPR_ELSE (node)) == GOTO_EXPR)) { pp_space (buffer); dump_generic_node (buffer, COND_EXPR_THEN (node), 0, flags, true); if (!IS_EMPTY_STMT (COND_EXPR_ELSE (node))) { pp_string (buffer, " else "); dump_generic_node (buffer, COND_EXPR_ELSE (node), 0, flags, true); } } else if (!(flags & TDF_SLIM)) { /* Output COND_EXPR_THEN. */ if (COND_EXPR_THEN (node)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, COND_EXPR_THEN (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } /* Output COND_EXPR_ELSE. */ if (COND_EXPR_ELSE (node) && !IS_EMPTY_STMT (COND_EXPR_ELSE (node))) { newline_and_indent (buffer, spc); pp_string (buffer, "else"); newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, COND_EXPR_ELSE (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } } is_expr = false; } else { dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '?'); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_space (buffer); pp_character (buffer, ':'); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); } break; case BIND_EXPR: pp_character (buffer, '{'); if (!(flags & TDF_SLIM)) { if (BIND_EXPR_VARS (node)) { pp_newline (buffer); for (op0 = BIND_EXPR_VARS (node); op0; op0 = TREE_CHAIN (op0)) { print_declaration (buffer, op0, spc+2, flags); pp_newline (buffer); } } newline_and_indent (buffer, spc+2); dump_generic_node (buffer, BIND_EXPR_BODY (node), spc+2, flags, true); newline_and_indent (buffer, spc); pp_character (buffer, '}'); } is_expr = false; break; case CALL_EXPR: print_call_name (buffer, CALL_EXPR_FN (node), flags); /* Print parameters. */ pp_space (buffer); pp_character (buffer, '('); { tree arg; call_expr_arg_iterator iter; FOR_EACH_CALL_EXPR_ARG (arg, iter, node) { dump_generic_node (buffer, arg, spc, flags, false); if (more_call_expr_args_p (&iter)) { pp_character (buffer, ','); pp_space (buffer); } } } if (CALL_EXPR_VA_ARG_PACK (node)) { if (call_expr_nargs (node) > 0) { pp_character (buffer, ','); pp_space (buffer); } pp_string (buffer, "__builtin_va_arg_pack ()"); } pp_character (buffer, ')'); op1 = CALL_EXPR_STATIC_CHAIN (node); if (op1) { pp_string (buffer, " [static-chain: "); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ']'); } if (CALL_EXPR_RETURN_SLOT_OPT (node)) pp_string (buffer, " [return slot optimization]"); if (CALL_EXPR_TAILCALL (node)) pp_string (buffer, " [tail call]"); break; case WITH_CLEANUP_EXPR: NIY; break; case CLEANUP_POINT_EXPR: pp_string (buffer, "<<cleanup_point "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">>"); break; case PLACEHOLDER_EXPR: pp_string (buffer, "<PLACEHOLDER_EXPR "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_character (buffer, '>'); break; /* Binary arithmetic and logic expressions. */ case WIDEN_SUM_EXPR: case WIDEN_MULT_EXPR: case MULT_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: case MINUS_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: case VEC_LSHIFT_EXPR: case VEC_RSHIFT_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: { const char *op = op_symbol (node); op0 = TREE_OPERAND (node, 0); op1 = TREE_OPERAND (node, 1); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op0) <= op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, op0, spc, flags, false); pp_space (buffer); pp_string (buffer, op); pp_space (buffer); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op1) <= op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, op1, spc, flags, false); } break; /* Unary arithmetic and logic expressions. */ case NEGATE_EXPR: case BIT_NOT_EXPR: case TRUTH_NOT_EXPR: case ADDR_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: case INDIRECT_REF: if (TREE_CODE (node) == ADDR_EXPR && (TREE_CODE (TREE_OPERAND (node, 0)) == STRING_CST || TREE_CODE (TREE_OPERAND (node, 0)) == FUNCTION_DECL)) ; /* Do not output '&' for strings and function pointers. */ else pp_string (buffer, op_symbol (node)); if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); if (TREE_CODE (node) == MISALIGNED_INDIRECT_REF) { pp_string (buffer, "{misalignment: "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '}'); } break; case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, op_symbol (node)); break; case MIN_EXPR: pp_string (buffer, "MIN_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '>'); break; case MAX_EXPR: pp_string (buffer, "MAX_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '>'); break; case ABS_EXPR: pp_string (buffer, "ABS_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case RANGE_EXPR: NIY; break; case ADDR_SPACE_CONVERT_EXPR: case FIXED_CONVERT_EXPR: case FIX_TRUNC_EXPR: case FLOAT_EXPR: CASE_CONVERT: type = TREE_TYPE (node); op0 = TREE_OPERAND (node, 0); if (type != TREE_TYPE (op0)) { pp_character (buffer, '('); dump_generic_node (buffer, type, spc, flags, false); pp_string (buffer, ") "); } if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); break; case VIEW_CONVERT_EXPR: pp_string (buffer, "VIEW_CONVERT_EXPR<"); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_string (buffer, ">("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); break; case PAREN_EXPR: pp_string (buffer, "(("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, "))"); break; case NON_LVALUE_EXPR: pp_string (buffer, "NON_LVALUE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case SAVE_EXPR: pp_string (buffer, "SAVE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case COMPLEX_EXPR: pp_string (buffer, "COMPLEX_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); break; case CONJ_EXPR: pp_string (buffer, "CONJ_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case REALPART_EXPR: pp_string (buffer, "REALPART_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case IMAGPART_EXPR: pp_string (buffer, "IMAGPART_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case VA_ARG_EXPR: pp_string (buffer, "VA_ARG_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: pp_string (buffer, "try"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); newline_and_indent (buffer, spc); pp_string (buffer, (TREE_CODE (node) == TRY_CATCH_EXPR) ? "catch" : "finally"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case CATCH_EXPR: pp_string (buffer, "catch ("); dump_generic_node (buffer, CATCH_TYPES (node), spc+2, flags, false); pp_string (buffer, ")"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, CATCH_BODY (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case EH_FILTER_EXPR: pp_string (buffer, "<<<eh_filter ("); dump_generic_node (buffer, EH_FILTER_TYPES (node), spc+2, flags, false); pp_string (buffer, ")>>>"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, EH_FILTER_FAILURE (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case LABEL_EXPR: op0 = TREE_OPERAND (node, 0); /* If this is for break or continue, don't bother printing it. */ if (DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) break; } dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, ':'); if (DECL_NONLOCAL (op0)) pp_string (buffer, " [non-local]"); break; case LOOP_EXPR: pp_string (buffer, "while (1)"); if (!(flags & TDF_SLIM)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, LOOP_EXPR_BODY (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } is_expr = false; break; case PREDICT_EXPR: pp_string (buffer, "// predicted "); if (PREDICT_EXPR_OUTCOME (node)) pp_string (buffer, "likely by "); else pp_string (buffer, "unlikely by "); pp_string (buffer, predictor_name (PREDICT_EXPR_PREDICTOR (node))); pp_string (buffer, " predictor."); break; case RETURN_EXPR: pp_string (buffer, "return"); op0 = TREE_OPERAND (node, 0); if (op0) { pp_space (buffer); if (TREE_CODE (op0) == MODIFY_EXPR) dump_generic_node (buffer, TREE_OPERAND (op0, 1), spc, flags, false); else dump_generic_node (buffer, op0, spc, flags, false); } break; case EXIT_EXPR: pp_string (buffer, "if ("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ") break"); break; case SWITCH_EXPR: pp_string (buffer, "switch ("); dump_generic_node (buffer, SWITCH_COND (node), spc, flags, false); pp_character (buffer, ')'); if (!(flags & TDF_SLIM)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); if (SWITCH_BODY (node)) { newline_and_indent (buffer, spc+4); dump_generic_node (buffer, SWITCH_BODY (node), spc+4, flags, true); } else { tree vec = SWITCH_LABELS (node); size_t i, n = TREE_VEC_LENGTH (vec); for (i = 0; i < n; ++i) { tree elt = TREE_VEC_ELT (vec, i); newline_and_indent (buffer, spc+4); if (elt) { dump_generic_node (buffer, elt, spc+4, flags, false); pp_string (buffer, " goto "); dump_generic_node (buffer, CASE_LABEL (elt), spc+4, flags, true); pp_semicolon (buffer); } else pp_string (buffer, "case ???: goto ???;"); } } newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } is_expr = false; break; case GOTO_EXPR: op0 = GOTO_DESTINATION (node); if (TREE_CODE (op0) != SSA_NAME && DECL_P (op0) && DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) { pp_string (buffer, name); break; } } pp_string (buffer, "goto "); dump_generic_node (buffer, op0, spc, flags, false); break; case ASM_EXPR: pp_string (buffer, "__asm__"); if (ASM_VOLATILE_P (node)) pp_string (buffer, " __volatile__"); pp_character (buffer, '('); dump_generic_node (buffer, ASM_STRING (node), spc, flags, false); pp_character (buffer, ':'); dump_generic_node (buffer, ASM_OUTPUTS (node), spc, flags, false); pp_character (buffer, ':'); dump_generic_node (buffer, ASM_INPUTS (node), spc, flags, false); if (ASM_CLOBBERS (node)) { pp_character (buffer, ':'); dump_generic_node (buffer, ASM_CLOBBERS (node), spc, flags, false); } pp_string (buffer, ")"); break; case CASE_LABEL_EXPR: if (CASE_LOW (node) && CASE_HIGH (node)) { pp_string (buffer, "case "); dump_generic_node (buffer, CASE_LOW (node), spc, flags, false); pp_string (buffer, " ... "); dump_generic_node (buffer, CASE_HIGH (node), spc, flags, false); } else if (CASE_LOW (node)) { pp_string (buffer, "case "); dump_generic_node (buffer, CASE_LOW (node), spc, flags, false); } else pp_string (buffer, "default"); pp_character (buffer, ':'); break; case OBJ_TYPE_REF: pp_string (buffer, "OBJ_TYPE_REF("); dump_generic_node (buffer, OBJ_TYPE_REF_EXPR (node), spc, flags, false); pp_character (buffer, ';'); dump_generic_node (buffer, OBJ_TYPE_REF_OBJECT (node), spc, flags, false); pp_character (buffer, '-'); pp_character (buffer, '>'); dump_generic_node (buffer, OBJ_TYPE_REF_TOKEN (node), spc, flags, false); pp_character (buffer, ')'); break; case SSA_NAME: dump_generic_node (buffer, SSA_NAME_VAR (node), spc, flags, false); pp_string (buffer, "_"); pp_decimal_int (buffer, SSA_NAME_VERSION (node)); if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (node)) pp_string (buffer, "(ab)"); else if (SSA_NAME_IS_DEFAULT_DEF (node)) pp_string (buffer, "(D)"); break; case WITH_SIZE_EXPR: pp_string (buffer, "WITH_SIZE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); break; case ASSERT_EXPR: pp_string (buffer, "ASSERT_EXPR <"); dump_generic_node (buffer, ASSERT_EXPR_VAR (node), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, ASSERT_EXPR_COND (node), spc, flags, false); pp_string (buffer, ">"); break; case SCEV_KNOWN: pp_string (buffer, "scev_known"); break; case SCEV_NOT_KNOWN: pp_string (buffer, "scev_not_known"); break; case POLYNOMIAL_CHREC: pp_string (buffer, "{"); dump_generic_node (buffer, CHREC_LEFT (node), spc, flags, false); pp_string (buffer, ", +, "); dump_generic_node (buffer, CHREC_RIGHT (node), spc, flags, false); pp_string (buffer, "}_"); dump_generic_node (buffer, CHREC_VAR (node), spc, flags, false); is_stmt = false; break; case REALIGN_LOAD_EXPR: pp_string (buffer, "REALIGN_LOAD <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, ">"); break; case VEC_COND_EXPR: pp_string (buffer, " VEC_COND_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case DOT_PROD_EXPR: pp_string (buffer, " DOT_PROD_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case OMP_PARALLEL: pp_string (buffer, "#pragma omp parallel"); dump_omp_clauses (buffer, OMP_PARALLEL_CLAUSES (node), spc, flags); dump_omp_body: if (!(flags & TDF_SLIM) && OMP_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); newline_and_indent (buffer, spc + 4); dump_generic_node (buffer, OMP_BODY (node), spc + 4, flags, false); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } is_expr = false; break; case OMP_TASK: pp_string (buffer, "#pragma omp task"); dump_omp_clauses (buffer, OMP_TASK_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_FOR: pp_string (buffer, "#pragma omp for"); dump_omp_clauses (buffer, OMP_FOR_CLAUSES (node), spc, flags); if (!(flags & TDF_SLIM)) { int i; if (OMP_FOR_PRE_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); spc += 4; newline_and_indent (buffer, spc); dump_generic_node (buffer, OMP_FOR_PRE_BODY (node), spc, flags, false); } spc -= 2; for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (node)); i++) { spc += 2; newline_and_indent (buffer, spc); pp_string (buffer, "for ("); dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_INIT (node), i), spc, flags, false); pp_string (buffer, "; "); dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_COND (node), i), spc, flags, false); pp_string (buffer, "; "); dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_INCR (node), i), spc, flags, false); pp_string (buffer, ")"); } if (OMP_FOR_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); newline_and_indent (buffer, spc + 4); dump_generic_node (buffer, OMP_FOR_BODY (node), spc + 4, flags, false); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } spc -= 2 * TREE_VEC_LENGTH (OMP_FOR_INIT (node)) - 2; if (OMP_FOR_PRE_BODY (node)) { spc -= 4; newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } } is_expr = false; break; case OMP_SECTIONS: pp_string (buffer, "#pragma omp sections"); dump_omp_clauses (buffer, OMP_SECTIONS_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_SECTION: pp_string (buffer, "#pragma omp section"); goto dump_omp_body; case OMP_MASTER: pp_string (buffer, "#pragma omp master"); goto dump_omp_body; case OMP_ORDERED: pp_string (buffer, "#pragma omp ordered"); goto dump_omp_body; case OMP_CRITICAL: pp_string (buffer, "#pragma omp critical"); if (OMP_CRITICAL_NAME (node)) { pp_space (buffer); pp_character (buffer, '('); dump_generic_node (buffer, OMP_CRITICAL_NAME (node), spc, flags, false); pp_character (buffer, ')'); } goto dump_omp_body; case OMP_ATOMIC: pp_string (buffer, "#pragma omp atomic"); newline_and_indent (buffer, spc + 2); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); break; case OMP_SINGLE: pp_string (buffer, "#pragma omp single"); dump_omp_clauses (buffer, OMP_SINGLE_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_CLAUSE: dump_omp_clause (buffer, node, spc, flags); is_expr = false; break; case REDUC_MAX_EXPR: pp_string (buffer, " REDUC_MAX_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case REDUC_MIN_EXPR: pp_string (buffer, " REDUC_MIN_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case REDUC_PLUS_EXPR: pp_string (buffer, " REDUC_PLUS_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_WIDEN_MULT_HI_EXPR: pp_string (buffer, " VEC_WIDEN_MULT_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_WIDEN_MULT_LO_EXPR: pp_string (buffer, " VEC_WIDEN_MULT_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_HI_EXPR: pp_string (buffer, " VEC_UNPACK_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_LO_EXPR: pp_string (buffer, " VEC_UNPACK_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_FLOAT_HI_EXPR: pp_string (buffer, " VEC_UNPACK_FLOAT_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_FLOAT_LO_EXPR: pp_string (buffer, " VEC_UNPACK_FLOAT_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_TRUNC_EXPR: pp_string (buffer, " VEC_PACK_TRUNC_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_SAT_EXPR: pp_string (buffer, " VEC_PACK_SAT_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_FIX_TRUNC_EXPR: pp_string (buffer, " VEC_PACK_FIX_TRUNC_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case BLOCK: dump_block_node (buffer, node, spc, flags); break; case VEC_EXTRACT_EVEN_EXPR: pp_string (buffer, " VEC_EXTRACT_EVEN_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_EXTRACT_ODD_EXPR: pp_string (buffer, " VEC_EXTRACT_ODD_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_INTERLEAVE_HIGH_EXPR: pp_string (buffer, " VEC_INTERLEAVE_HIGH_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_INTERLEAVE_LOW_EXPR: pp_string (buffer, " VEC_INTERLEAVE_LOW_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; default: NIY; } if (is_stmt && is_expr) pp_semicolon (buffer); /* If we're building a diagnostic, the formatted text will be written into BUFFER's stream by the caller; otherwise, write it now. */ if (!(flags & TDF_DIAGNOSTIC)) pp_write_text_to_stream (buffer); return spc; } /* Print the declaration of a variable. */ void print_declaration (pretty_printer *buffer, tree t, int spc, int flags) { INDENT (spc); if (TREE_CODE (t) == TYPE_DECL) pp_string (buffer, "typedef "); if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL) && DECL_REGISTER (t)) pp_string (buffer, "register "); if (TREE_PUBLIC (t) && DECL_EXTERNAL (t)) pp_string (buffer, "extern "); else if (TREE_STATIC (t)) pp_string (buffer, "static "); /* Print the type and name. */ if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) { tree tmp; /* Print array's type. */ tmp = TREE_TYPE (t); while (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE) tmp = TREE_TYPE (tmp); dump_generic_node (buffer, TREE_TYPE (tmp), spc, flags, false); /* Print variable's name. */ pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); /* Print the dimensions. */ tmp = TREE_TYPE (t); while (TREE_CODE (tmp) == ARRAY_TYPE) { dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags); tmp = TREE_TYPE (tmp); } } else if (TREE_CODE (t) == FUNCTION_DECL) { dump_generic_node (buffer, TREE_TYPE (TREE_TYPE (t)), spc, flags, false); pp_space (buffer); dump_decl_name (buffer, t, flags); dump_function_declaration (buffer, TREE_TYPE (t), spc, flags); } else { /* Print type declaration. */ dump_generic_node (buffer, TREE_TYPE (t), spc, flags, false); /* Print variable's name. */ pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); } if (TREE_CODE (t) == VAR_DECL && DECL_HARD_REGISTER (t)) { pp_string (buffer, " __asm__ "); pp_character (buffer, '('); dump_generic_node (buffer, DECL_ASSEMBLER_NAME (t), spc, flags, false); pp_character (buffer, ')'); } /* The initial value of a function serves to determine whether the function is declared or defined. So the following does not apply to function nodes. */ if (TREE_CODE (t) != FUNCTION_DECL) { /* Print the initial value. */ if (DECL_INITIAL (t)) { pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, DECL_INITIAL (t), spc, flags, false); } } if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t)) { pp_string (buffer, " [value-expr: "); dump_generic_node (buffer, DECL_VALUE_EXPR (t), spc, flags, false); pp_character (buffer, ']'); } pp_character (buffer, ';'); } /* Prints a structure: name, fields, and methods. FIXME: Still incomplete. */ static void print_struct_decl (pretty_printer *buffer, const_tree node, int spc, int flags) { /* Print the name of the structure. */ if (TYPE_NAME (node)) { INDENT (spc); if (TREE_CODE (node) == RECORD_TYPE) pp_string (buffer, "struct "); else if ((TREE_CODE (node) == UNION_TYPE || TREE_CODE (node) == QUAL_UNION_TYPE)) pp_string (buffer, "union "); dump_generic_node (buffer, TYPE_NAME (node), spc, 0, false); } /* Print the contents of the structure. */ pp_newline (buffer); INDENT (spc); pp_character (buffer, '{'); pp_newline (buffer); /* Print the fields of the structure. */ { tree tmp; tmp = TYPE_FIELDS (node); while (tmp) { /* Avoid to print recursively the structure. */ /* FIXME : Not implemented correctly..., what about the case when we have a cycle in the contain graph? ... Maybe this could be solved by looking at the scope in which the structure was declared. */ if (TREE_TYPE (tmp) != node && (TREE_CODE (TREE_TYPE (tmp)) != POINTER_TYPE || TREE_TYPE (TREE_TYPE (tmp)) != node)) { print_declaration (buffer, tmp, spc+2, flags); pp_newline (buffer); } tmp = TREE_CHAIN (tmp); } } INDENT (spc); pp_character (buffer, '}'); } /* Return the priority of the operator CODE. From lowest to highest precedence with either left-to-right (L-R) or right-to-left (R-L) associativity]: 1 [L-R] , 2 [R-L] = += -= *= /= %= &= ^= |= <<= >>= 3 [R-L] ?: 4 [L-R] || 5 [L-R] && 6 [L-R] | 7 [L-R] ^ 8 [L-R] & 9 [L-R] == != 10 [L-R] < <= > >= 11 [L-R] << >> 12 [L-R] + - 13 [L-R] * / % 14 [R-L] ! ~ ++ -- + - * & (type) sizeof 15 [L-R] fn() [] -> . unary +, - and * have higher precedence than the corresponding binary operators. */ int op_code_prio (enum tree_code code) { switch (code) { case TREE_LIST: case COMPOUND_EXPR: case BIND_EXPR: return 1; case MODIFY_EXPR: case INIT_EXPR: return 2; case COND_EXPR: return 3; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return 4; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return 5; case BIT_IOR_EXPR: return 6; case BIT_XOR_EXPR: case TRUTH_XOR_EXPR: return 7; case BIT_AND_EXPR: return 8; case EQ_EXPR: case NE_EXPR: return 9; case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: return 10; case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: return 11; case WIDEN_SUM_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: case MINUS_EXPR: return 12; case VEC_WIDEN_MULT_HI_EXPR: case VEC_WIDEN_MULT_LO_EXPR: case WIDEN_MULT_EXPR: case DOT_PROD_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: return 13; case TRUTH_NOT_EXPR: case BIT_NOT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case NEGATE_EXPR: case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: case INDIRECT_REF: case ADDR_EXPR: case FLOAT_EXPR: CASE_CONVERT: case FIX_TRUNC_EXPR: case TARGET_EXPR: return 14; case CALL_EXPR: case ARRAY_REF: case ARRAY_RANGE_REF: case COMPONENT_REF: return 15; /* Special expressions. */ case MIN_EXPR: case MAX_EXPR: case ABS_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: case REDUC_MAX_EXPR: case REDUC_MIN_EXPR: case REDUC_PLUS_EXPR: case VEC_LSHIFT_EXPR: case VEC_RSHIFT_EXPR: case VEC_UNPACK_HI_EXPR: case VEC_UNPACK_LO_EXPR: case VEC_UNPACK_FLOAT_HI_EXPR: case VEC_UNPACK_FLOAT_LO_EXPR: case VEC_PACK_TRUNC_EXPR: case VEC_PACK_SAT_EXPR: return 16; default: /* Return an arbitrarily high precedence to avoid surrounding single VAR_DECLs in ()s. */ return 9999; } } /* Return the priority of the operator OP. */ int op_prio (const_tree op) { enum tree_code code; if (op == NULL) return 9999; code = TREE_CODE (op); if (code == SAVE_EXPR || code == NON_LVALUE_EXPR) return op_prio (TREE_OPERAND (op, 0)); return op_code_prio (code); } /* Return the symbol associated with operator CODE. */ const char * op_symbol_code (enum tree_code code) { switch (code) { case MODIFY_EXPR: return "="; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return "||"; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return "&&"; case BIT_IOR_EXPR: return "|"; case TRUTH_XOR_EXPR: case BIT_XOR_EXPR: return "^"; case ADDR_EXPR: case BIT_AND_EXPR: return "&"; case ORDERED_EXPR: return "ord"; case UNORDERED_EXPR: return "unord"; case EQ_EXPR: return "=="; case UNEQ_EXPR: return "u=="; case NE_EXPR: return "!="; case LT_EXPR: return "<"; case UNLT_EXPR: return "u<"; case LE_EXPR: return "<="; case UNLE_EXPR: return "u<="; case GT_EXPR: return ">"; case UNGT_EXPR: return "u>"; case GE_EXPR: return ">="; case UNGE_EXPR: return "u>="; case LTGT_EXPR: return "<>"; case LSHIFT_EXPR: return "<<"; case RSHIFT_EXPR: return ">>"; case LROTATE_EXPR: return "r<<"; case RROTATE_EXPR: return "r>>"; case VEC_LSHIFT_EXPR: return "v<<"; case VEC_RSHIFT_EXPR: return "v>>"; case POINTER_PLUS_EXPR: return "+"; case PLUS_EXPR: return "+"; case REDUC_PLUS_EXPR: return "r+"; case WIDEN_SUM_EXPR: return "w+"; case WIDEN_MULT_EXPR: return "w*"; case NEGATE_EXPR: case MINUS_EXPR: return "-"; case BIT_NOT_EXPR: return "~"; case TRUTH_NOT_EXPR: return "!"; case MULT_EXPR: case INDIRECT_REF: return "*"; case ALIGN_INDIRECT_REF: return "A*"; case MISALIGNED_INDIRECT_REF: return "M*"; case TRUNC_DIV_EXPR: case RDIV_EXPR: return "/"; case CEIL_DIV_EXPR: return "/[cl]"; case FLOOR_DIV_EXPR: return "/[fl]"; case ROUND_DIV_EXPR: return "/[rd]"; case EXACT_DIV_EXPR: return "/[ex]"; case TRUNC_MOD_EXPR: return "%"; case CEIL_MOD_EXPR: return "%[cl]"; case FLOOR_MOD_EXPR: return "%[fl]"; case ROUND_MOD_EXPR: return "%[rd]"; case PREDECREMENT_EXPR: return " --"; case PREINCREMENT_EXPR: return " ++"; case POSTDECREMENT_EXPR: return "-- "; case POSTINCREMENT_EXPR: return "++ "; case MAX_EXPR: return "max"; case MIN_EXPR: return "min"; default: return "<<< ??? >>>"; } } /* Return the symbol associated with operator OP. */ static const char * op_symbol (const_tree op) { return op_symbol_code (TREE_CODE (op)); } /* Prints the name of a call. NODE is the CALL_EXPR_FN of a CALL_EXPR or the gimple_call_fn of a GIMPLE_CALL. */ void print_call_name (pretty_printer *buffer, tree node, int flags) { tree op0 = node; if (TREE_CODE (op0) == NON_LVALUE_EXPR) op0 = TREE_OPERAND (op0, 0); again: switch (TREE_CODE (op0)) { case VAR_DECL: case PARM_DECL: case FUNCTION_DECL: dump_function_name (buffer, op0, flags); break; case ADDR_EXPR: case INDIRECT_REF: case NOP_EXPR: op0 = TREE_OPERAND (op0, 0); goto again; case COND_EXPR: pp_string (buffer, "("); dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, flags, false); pp_string (buffer, ") ? "); dump_generic_node (buffer, TREE_OPERAND (op0, 1), 0, flags, false); pp_string (buffer, " : "); dump_generic_node (buffer, TREE_OPERAND (op0, 2), 0, flags, false); break; case ARRAY_REF: if (TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL) dump_function_name (buffer, TREE_OPERAND (op0, 0), flags); else dump_generic_node (buffer, op0, 0, flags, false); break; case COMPONENT_REF: case SSA_NAME: case OBJ_TYPE_REF: dump_generic_node (buffer, op0, 0, flags, false); break; default: NIY; } } /* Parses the string STR and replaces new-lines by '\n', tabs by '\t', ... */ static void pretty_print_string (pretty_printer *buffer, const char *str) { if (str == NULL) return; while (*str) { switch (str[0]) { case '\b': pp_string (buffer, "\\b"); break; case '\f': pp_string (buffer, "\\f"); break; case '\n': pp_string (buffer, "\\n"); break; case '\r': pp_string (buffer, "\\r"); break; case '\t': pp_string (buffer, "\\t"); break; case '\v': pp_string (buffer, "\\v"); break; case '\\': pp_string (buffer, "\\\\"); break; case '\"': pp_string (buffer, "\\\""); break; case '\'': pp_string (buffer, "\\'"); break; /* No need to handle \0; the loop terminates on \0. */ case '\1': pp_string (buffer, "\\1"); break; case '\2': pp_string (buffer, "\\2"); break; case '\3': pp_string (buffer, "\\3"); break; case '\4': pp_string (buffer, "\\4"); break; case '\5': pp_string (buffer, "\\5"); break; case '\6': pp_string (buffer, "\\6"); break; case '\7': pp_string (buffer, "\\7"); break; default: pp_character (buffer, str[0]); break; } str++; } } static void maybe_init_pretty_print (FILE *file) { if (!initialized) { pp_construct (&buffer, /* prefix */NULL, /* line-width */0); pp_needs_newline (&buffer) = true; pp_translate_identifiers (&buffer) = false; initialized = 1; } buffer.buffer->stream = file; } static void newline_and_indent (pretty_printer *buffer, int spc) { pp_newline (buffer); INDENT (spc); }
hd_joint_probability_generator_inl.h
/* * * Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the Delft University of Technology. * 4. Neither the name of the Delft University of Technology nor the names of * its contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * */ #ifndef HD_JOINT_PROBABILITY_GENERATOR_INL #define HD_JOINT_PROBABILITY_GENERATOR_INL #include "hdi/dimensionality_reduction/hd_joint_probability_generator.h" #include "hdi/utils/math_utils.h" #include "hdi/utils/log_helper_functions.h" #include "hdi/utils/scoped_timers.h" #include <random> #include <chrono> #include <unordered_set> #include <numeric> #ifdef HNSWLIB_FOUND #ifdef _MSC_VER #if (_MSC_VER >= 1910) #include "hnswlib/hnswlib.h" #include "hnswlib/space_l2.h" #define HNSWLIB_SUPPORTED #endif //__cplusplus >=201103 #else // _MSC_VER #include "hnswlib/hnswlib.h" #include "hnswlib/space_l2.h" #define HNSWLIB_SUPPORTED #endif #endif #ifdef __USE_GCD__ #include <dispatch/dispatch.h> #else #define __block #endif #pragma warning( push ) #pragma warning( disable : 4267) #pragma warning( push ) #pragma warning( disable : 4291) #pragma warning( push ) #pragma warning( disable : 4996) #pragma warning( push ) #pragma warning( disable : 4018) #pragma warning( push ) #pragma warning( disable : 4244) #include "flann/flann.h" #pragma warning( pop ) #pragma warning( pop ) #pragma warning( pop ) #pragma warning( pop ) #pragma warning( pop ) namespace hdi{ namespace dr{ ///////////////////////////////////////////////////////////////////////// template <typename scalar, typename sparse_scalar_matrix> HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::Parameters::Parameters(): _perplexity(30), _perplexity_multiplier(3), _num_trees(4), _num_checks(1024), _aknn_algorithm(-1), _aknn_algorithmP1(16), // default parameter for HNSW _aknn_algorithmP2(200) // default parameter for HNSW {} ///////////////////////////////////////////////////////////////////////// template <typename scalar, typename sparse_scalar_matrix> HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::Statistics::Statistics(): _total_time(0), _trees_construction_time(0), _aknn_time(0), _distribution_time(0) {} template <typename scalar, typename sparse_scalar_matrix> void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::Statistics::reset(){ _total_time = 0; _trees_construction_time = 0; _aknn_time = 0; _distribution_time = 0; } template <typename scalar, typename sparse_scalar_matrix> void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::Statistics::log(utils::AbstractLog* logger)const{ utils::secureLog(logger,"\n-------- HD Joint Probability Generator Statistics -----------"); utils::secureLogValue(logger,"Total time",_total_time); utils::secureLogValue(logger,"\tTrees construction time",_trees_construction_time,true,1); utils::secureLogValue(logger,"\tAKNN time",_aknn_time,true,3); utils::secureLogValue(logger,"\tDistributions time",_distribution_time,true,2); utils::secureLog(logger,"--------------------------------------------------------------\n"); } ///////////////////////////////////////////////////////////////////////// template <typename scalar, typename sparse_scalar_matrix> HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::HDJointProbabilityGenerator(): _logger(nullptr) { } template <typename scalar, typename sparse_scalar_matrix> void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeJointProbabilityDistribution(scalar_type* high_dimensional_data, unsigned int num_dim, unsigned int num_dps, sparse_scalar_matrix& distribution, Parameters params){ utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time); hdi::utils::secureLog(_logger,"Computing the HD joint probability distribution..."); distribution.resize(num_dps); std::vector<scalar_type> distances_squared; std::vector<int> indices; computeHighDimensionalDistances(high_dimensional_data, num_dim, num_dps, distances_squared, indices, params); computeGaussianDistributions(distances_squared,indices,distribution,params); symmetrize(distribution); } template <typename scalar, typename sparse_scalar_matrix> void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeProbabilityDistributions(scalar_type* high_dimensional_data, unsigned int num_dim, unsigned int num_dps, sparse_scalar_matrix& distribution, Parameters params){ utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time); hdi::utils::secureLog(_logger,"Computing the HD joint probability distribution..."); distribution.resize(num_dps); std::vector<scalar_type> distances_squared; std::vector<int> indices; computeHighDimensionalDistances(high_dimensional_data, num_dim, num_dps, distances_squared, indices, params); computeGaussianDistributions(distances_squared,indices,distribution,params); } template <typename scalar, typename sparse_scalar_matrix> void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeProbabilityDistributions(scalar_type* high_dimensional_data, unsigned int num_dim, unsigned int num_dps, std::vector<scalar_type>& probabilities, std::vector<int>& indices, Parameters params){ utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time); hdi::utils::secureLog(_logger,"Computing the HD joint probability distribution..."); std::vector<scalar_type> distances_squared; computeHighDimensionalDistances(high_dimensional_data, num_dim, num_dps, distances_squared, indices, params); computeGaussianDistributions(distances_squared,indices,probabilities,params); } template <typename scalar, typename sparse_scalar_matrix> void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeHighDimensionalDistances(scalar_type* high_dimensional_data, unsigned int num_dim, unsigned int num_dps, std::vector<scalar_type>& distances_squared, std::vector<int>& indices, Parameters& params){ #ifdef HNSWLIB_SUPPORTED if (params._aknn_algorithm == -1) #endif { hdi::utils::secureLog(_logger, "Computing nearest neighborhoods..."); flann::Matrix<scalar_type> dataset(high_dimensional_data, num_dps, num_dim); flann::Matrix<scalar_type> query(high_dimensional_data, num_dps, num_dim); flann::Index<flann::L2<scalar_type> > index(dataset, flann::KDTreeIndexParams(params._num_trees)); const unsigned int nn = params._perplexity*params._perplexity_multiplier + 1; distances_squared.resize(num_dps*nn); indices.resize(num_dps*nn); { utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._trees_construction_time); index.buildIndex(); } { utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._aknn_time); flann::Matrix<int> indices_mat(indices.data(), query.rows, nn); flann::Matrix<scalar_type> dists_mat(distances_squared.data(), query.rows, nn); flann::SearchParams flann_params(params._num_checks); flann_params.cores = 0; //all cores index.knnSearch(query, indices_mat, dists_mat, nn, flann_params); } } #ifdef HNSWLIB_SUPPORTED else { hdi::utils::secureLog(_logger, "Computing nearest neighborhoods with HNSWLIB..."); hnswlib::L2Space l2space(num_dim); hnswlib::HierarchicalNSW<scalar> appr_alg(&l2space, num_dps, params._aknn_algorithmP1, params._aknn_algorithmP2, 0); { utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._trees_construction_time); appr_alg.addPoint((void*)high_dimensional_data, (std::size_t) 0); #pragma omp parallel for for (int i = 1; i < num_dps; ++i) { appr_alg.addPoint((void*)(high_dimensional_data + (i*num_dim)), (hnswlib::labeltype) i); } } const unsigned int nn = params._perplexity*params._perplexity_multiplier + 1; distances_squared.resize(num_dps*nn); indices.resize(num_dps*nn); { utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._aknn_time); #pragma omp parallel for for (int i = 0; i < num_dps; ++i) { auto top_candidates = appr_alg.searchKnn(high_dimensional_data + (i*num_dim), (hnswlib::labeltype)nn); while (top_candidates.size() > nn) { top_candidates.pop(); } auto *distances_offset = distances_squared.data() + (i*nn); auto indices_offset = indices.data() + (i*nn); int j = 0; while (top_candidates.size() > 0) { auto rez = top_candidates.top(); distances_offset[nn - j - 1] = rez.first; indices_offset[nn - j - 1] = appr_alg.getExternalLabel(rez.second); top_candidates.pop(); ++j; } } } } #endif } template <typename scalar, typename sparse_scalar_matrix> void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeGaussianDistributions(const std::vector<scalar_type>& distances_squared, const std::vector<int>& indices, sparse_scalar_matrix& distribution, Parameters& params){ utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._distribution_time); utils::secureLog(_logger,"Computing joint-probability distribution..."); const int n = distribution.size(); const unsigned int nn = params._perplexity*params._perplexity_multiplier + 1; #ifdef __USE_GCD__ __block scalar_vector_type temp_vector(distances_squared.size(),0); #else scalar_vector_type temp_vector(distances_squared.size(),0); #endif //__USE_GCD__ #ifdef __USE_GCD__ std::cout << "GCD dispatch, hd_joint_probability_generator 193.\n"; dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) { #else #pragma omp parallel for for(int j = 0; j < n; ++j){ #endif //__USE_GCD__ const auto sigma = utils::computeGaussianDistributionWithFixedPerplexity<scalar_vector_type>( distances_squared.begin() + j*nn, //check squared distances_squared.begin() + (j + 1)*nn, temp_vector.begin() + j*nn, temp_vector.begin() + (j + 1)*nn, params._perplexity, 200, 1e-5, 0 ); } #ifdef __USE_GCD__ ); #endif for(int j = 0; j < n; ++j){ for(int k = 1; k < nn; ++k){ const unsigned int i = j*nn+k; distribution[j][indices[i]] = temp_vector[i]; } } } template <typename scalar, typename sparse_scalar_matrix> void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeGaussianDistributions(const std::vector<scalar_type>& distances_squared, const std::vector<int>& indices, std::vector<scalar_type>& probabilities, Parameters& params){ utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._distribution_time); utils::secureLog(_logger,"Computing joint-probability distribution..."); const unsigned int nn = params._perplexity*params._perplexity_multiplier + 1; const int n = indices.size()/nn; #ifdef __USE_GCD__ std::cout << "GCD dispatch, hd_joint_probability_generator 232.\n"; dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) { #else #pragma omp parallel for for(int j = 0; j < n; ++j){ #endif //__USE_GCD__ const auto sigma = utils::computeGaussianDistributionWithFixedPerplexity<scalar_vector_type>( distances_squared.begin() + j*nn, //check squared distances_squared.begin() + (j + 1)*nn, probabilities.begin() + j*nn, probabilities.begin() + (j + 1)*nn, params._perplexity, 200, 1e-5, 0 ); } #ifdef __USE_GCD__ ); #endif } template <typename scalar, typename sparse_scalar_matrix> void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::symmetrize(sparse_scalar_matrix& distribution){ const int n = distribution.size(); for(int j = 0; j < n; ++j){ for(auto& e: distribution[j]){ const unsigned int i = e.first; scalar new_val = (distribution[j][i]+distribution[i][j])*0.5; distribution[j][i] = new_val; distribution[i][j] = new_val; } } } template <typename scalar, typename sparse_scalar_matrix> void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeProbabilityDistributionsFromDistanceMatrix(const std::vector<scalar_type>& squared_distance_matrix, unsigned int num_dps, sparse_scalar_matrix& distribution, Parameters params){ utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._distribution_time); utils::secureLog(_logger,"Computing joint-probability distribution..."); const int n = num_dps; const unsigned int nn = num_dps; #ifdef __USE_GCD__ __block scalar_vector_type temp_vector(num_dps*num_dps,0); #else scalar_vector_type temp_vector(num_dps*num_dps,0); #endif //__USE_GCD__ distribution.clear(); distribution.resize(n); #ifdef __USE_GCD__ std::cout << "GCD dispatch, hd_joint_probability_generator 193.\n"; dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) { #else #pragma omp parallel for for(int j = 0; j < n; ++j){ #endif //__USE_GCD__ const auto sigma = utils::computeGaussianDistributionWithFixedPerplexity<scalar_vector_type>( squared_distance_matrix.begin() + j*nn, //check squared squared_distance_matrix.begin() + (j + 1)*nn, temp_vector.begin() + j*nn, temp_vector.begin() + (j + 1)*nn, params._perplexity, 200, 1e-5, j ); } #ifdef __USE_GCD__ ); #endif for(int j = 0; j < n; ++j){ for(int k = 0; k < nn; ++k){ const unsigned int i = j*nn+k; distribution[j][k] = temp_vector[i]; } } } ///////////////////////////////////////////////////////////////////////////////////7 } } #endif
Locks.h
// -*- C++ -*- Copyright (c) Microsoft Corporation; see license.txt #ifndef MESH_PROCESSING_LIBHH_LOCKS_H_ #define MESH_PROCESSING_LIBHH_LOCKS_H_ #include "Hh.h" #if 0 { // critical section parallel_for_each(range(100), [&](const int i) { something(); HH_LOCK { something_synchronized(): } }); // alternate std::mutex g_mutex; parallel_for_each(range(100), [&](const int i) { something(); { std::lock_guard<std::mutex> lg(g_mutex); something_synchronized(); } }); } #endif #include <mutex> // mutex, lock_guard; C++11 namespace hh { //---------------------------------------------------------------------------- // *** No support for locks. #if defined(HH_DEFINE_STD_MUTEX) #define HH_LOCK //---------------------------------------------------------------------------- // *** Use OpenMP for synchronization (its default is a globally defined mutex). #elif 0 #include <omp.h> // OpenMP #define HH_LOCK HH_PRAGMA(omp critical) // A thread waits at the beginning of a critical region until no other thread is executing a critical region // (anywhere in the program) with the same name. All unnamed critical directives map to the same unspecified // name. // Drawbacks of OpenMP critical sections: // (http://www.thinkingparallel.com/2006/08/21/scoped-locking-vs-critical-in-openmp-a-personal-shootout/) // - cannot leave scope using break, continue, goto, or return // - cannot leave scope with exception (e.g. gcc does implicit catch and reports error)! // - cannot use a separate lock guard per-task in a thread-safe function (rare need). //---------------------------------------------------------------------------- // *** All critical sections across the program share the same globally defined mutex -- like OpenMP default. #elif 0 class MyGlobalLock { public: MyGlobalLock() : _lock_guard(s_f_global_mutex()) { } private: std::lock_guard<std::mutex> _lock_guard; static std::mutex& s_f_global_mutex() { static auto m = new std::mutex; return *m; } // singleton pattern function }; #define HH_LOCK if (hh::false_capture<hh::MyGlobalLock> HH_UNIQUE_ID(lock){}) { HH_UNREACHABLE; } else //---------------------------------------------------------------------------- // *** The critical sections in each compilation unit (*.cpp file) share the same mutex. #elif 1 namespace { std::mutex s_per_file_mutex; class MyPerFileLock { public: MyPerFileLock() : _lock_guard(s_per_file_mutex) { } private: std::lock_guard<std::mutex> _lock_guard; }; } // namespace #define HH_LOCK if (hh::false_capture<hh::MyPerFileLock> HH_UNIQUE_ID(lock){}) { HH_UNREACHABLE; } else //---------------------------------------------------------------------------- // *** Each critical section has its own mutex. #elif 0 // I can't see a way to implement that using an HH_LOCK { } type macro, // because there is no way to declare/allocate a static variable in the middle of a statement. // Maybe use a false_capture of a templated class with template argument based on __COUNTER__ // (and singleton pattern function)? //---------------------------------------------------------------------------- // *** Old paired macros (all critical sections use a separately defined mutex). #else #error These paired macros are no longer supported. #define HH_BEGIN_LOCK { static std::mutex my_mutex1; std::lock_guard<std::mutex> HH_UNIQUE_ID(lock){my_mutex1}; #define HH_END_LOCK } HH_EAT_SEMICOLON #endif } // namespace hh //---------------------------------------------------------------------------- // Good discussion: // // http://stackoverflow.com/questions/23519630/are-there-c11-critical-sections // The C++11 std::mutex does not require cross-processing locking, so a reasonable implementation // should avoid cross-process objects like named semaphores (or win32 Mutex). // // http://stackoverflow.com/questions/800383/what-is-the-difference-between-mutex-and-critical-section/ // // For Windows, critical sections are lighter-weight than mutexes. [here mutex is a win32 Mutex, not std::mutex] // - Mutexes can be shared between processes, but always result in a system call to the kernel which has some overhead. // - Critical sections can only be used within one process, but have the advantage that they only switch to // kernel mode in the case of contention - Uncontended acquires, which should be the common case, are // incredibly fast. In the case of contention, they enter the kernel to wait on some synchronization primitive // (like an event or semaphore). // The following details are specific to critical sections on windows: // - in the absence of contention, acquiring a critical section is as simple as an InterlockedCompareExchange operation // - the critical section structure holds room for a mutex. It is initially unallocated // - if there is contention between threads for a critical section, the mutex will be allocated and used. The // performance of the critical section will degrade to that of the mutex // - if you anticipate high contention, you can allocate the critical section specifying a spin count. // - if there is contention on a critical section with a spin count, the thread attempting to acquire the // critical section will spin (busy-wait) for that many processor cycles. This can result in better // performance than sleeping, as the number of cycles to perform a context switch to another thread can be // much higher than the number of cycles taken by the owning thread to release the mutex // - if the spin count expires, the mutex will be allocated // - when the owning thread releases the critical section, it is required to check if the mutex is allocated, // if it is then it will set the mutex to release a waiting thread // http://stackoverflow.com/questions/7798010/openmp-atomic-vs-critical/ // In OpenMP all unnamed critical sections are considered identical (if you prefer, there's only one lock for // all unnamed critical sections). // #pragma omp critical [(name)] // Note that name must be enclosed in parentheses. // Also use the following, which is faster than a critical section: // HH_PRAGMA(omp atomic) g_nslow++; // It works for x+=, x-=, x*=, x&=, etc., and --x and ++x. // Better yet, use std::atomic<T>. // Possibly could use atomic_operate() defined in AtomicOperate.h #endif // MESH_PROCESSING_LIBHH_LOCKS_H_
gbdt.h
#ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <LightGBM/json11.hpp> #include "score_updater.hpp" #include <cstdio> #include <vector> #include <string> #include <fstream> #include <memory> #include <mutex> #include <map> using namespace json11; namespace LightGBM { /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ virtual bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ virtual const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ virtual int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_preb_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); if (num_iteration > 0) { num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration)); } else { num_preb_in_one_row *= max_iteration; } } else if (is_pred_contrib) { num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_preb_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; /*! * \brief Dump model to json format string * \param num_iteration Number of iterations that want to dump, -1 means dump all * \return Json format string of model */ std::string DumpModel(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; void MyModelToIfElse( const std::string& namespaceDisplay, std::ostream& str_buf )const override; /*! * \brief Save model to file * \param num_iterations Number of model that want to save, -1 means save all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ virtual bool SaveModelToFile(int num_iterations, const char* filename) const override; /*! * \brief Save model to string * \param num_iterations Number of model that want to save, -1 means save all * \return Non-empty string if succeeded */ virtual std::string SaveModelToString(int num_iterations) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_); } if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ virtual const char* SubModelName() const override { return "tree"; } protected: /*! * \brief Print eval result and check early stopping */ bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); /*! * \brief Helper function for bagging, used for multi-threading optimization * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BaggingHelper(Random& cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief First order derivative of training data */ std::vector<score_t> gradients_; /*! \brief Secend order derivative of training data */ std::vector<score_t> hessians_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> tmp_indices_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; /*! \brief number of threads */ int num_threads_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> offsets_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_write_pos_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_write_pos_buf_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; std::vector<double> class_default_output_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; std::string loaded_parameter_; Json forced_splits_json_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
residualbased_newton_raphson_contact_strategy.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY) #define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY /* System Includes */ /* External Includes */ /* Project includes */ #include "contact_structural_mechanics_application_variables.h" #include "includes/kratos_parameters.h" #include "includes/define.h" #include "includes/model_part.h" #include "includes/variables.h" // Strategies #include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h" // Utilities #include "utilities/variable_utils.h" #include "utilities/color_utilities.h" #include "utilities/math_utils.h" #include "custom_python/process_factory_utility.h" #include "custom_utilities/contact_utilities.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedNewtonRaphsonContactStrategy * @ingroup ContactStructuralMechanicsApplication * @brief Contact Newton Raphson class * @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedNewtonRaphsonContactStrategy : public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonContactStrategy ); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ProcessFactoryUtility::Pointer ProcessesListType; typedef std::size_t IndexType; /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})"), ProcessesListType pMyProcesses = nullptr, ProcessesListType pPostProcesses = nullptr ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters), mpMyProcesses(pMyProcesses), mpPostProcesses(pPostProcesses) { KRATOS_TRY; mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})"), ProcessesListType pMyProcesses = nullptr, ProcessesListType pPostProcesses = nullptr ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag), mThisParameters(ThisParameters), mpMyProcesses(pMyProcesses), mpPostProcesses(pPostProcesses) { KRATOS_TRY; mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})"), ProcessesListType pMyProcesses = nullptr, ProcessesListType pPostProcesses = nullptr ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters), mpMyProcesses(pMyProcesses), mpPostProcesses(pPostProcesses) { KRATOS_TRY; mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Destructor. */ ~ResidualBasedNewtonRaphsonContactStrategy() override = default; //******************** OPERATIONS ACCESSIBLE FROM THE INPUT: ************************// //***********************************************************************************// /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the * values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY // Auxiliar zero array const array_1d<double, 3> zero_array = ZeroVector(3); // Set to zero the weighted gap ModelPart& r_model_part = StrategyBaseType::GetModelPart(); NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes(); const bool frictional = r_model_part.Is(SLIP); // We predict contact pressure in case of contact problem if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) { VariableUtils().SetVariable(WEIGHTED_GAP, 0.0, nodes_array); if (frictional) { VariableUtils().SetVariable(WEIGHTED_SLIP, zero_array, nodes_array); } // Compute the current gap ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact")); // We predict a contact pressure ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); const std::size_t step = r_process_info[STEP]; if (step == 1) { #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT); } } else { #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) += (it_node->FastGetSolutionStepValue(DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT, 1)); } } } // BaseType::Predict(); // NOTE: May cause problems in dynamics!!! // // // Set to zero the weighted gap // NOTE: This can be done during the search if the predict is deactivated // ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes(); // // // We predict contact pressure in case of contact problem // if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) { // VariableUtils().SetVariable(WEIGHTED_GAP, 0.0, nodes_array); // // // Compute the current gap // ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact")); // // // We predict a contact pressure // ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); // const double initial_penalty_parameter = r_process_info[INITIAL_PENALTY]; // // // We iterate over the nodes // bool is_components = nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) ? false : true; // // #pragma omp parallel for // for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { // auto it_node = nodes_array.begin() + i; // // const double current_gap = it_node->FastGetSolutionStepValue(WEIGHTED_GAP); // // const double penalty = it_node->Has(INITIAL_PENALTY) ? it_node->GetValue(INITIAL_PENALTY) : initial_penalty_parameter; // // if (current_gap < 0.0) { // it_node->Set(ACTIVE, true); // if (is_components) { // it_node->FastGetSolutionStepValue(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) = penalty * current_gap; // } else { // const array_1d<double, 3>& normal = it_node->FastGetSolutionStepValue(NORMAL); // it_node->FastGetSolutionStepValue(VECTOR_LAGRANGE_MULTIPLIER) = penalty * current_gap * normal; // } // } // } // } KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY; BaseType::Initialize(); mFinalizeWasPerformed = false; // Initializing NL_ITERATION_NUMBER ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); r_process_info[NL_ITERATION_NUMBER] = 1; KRATOS_CATCH(""); } /** * @brief The problem of interest is solved. * @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(), * SolveSolutionStep() and FinalizeSolutionStep(). * All those functions can otherwise be called separately. */ double Solve() override { this->Initialize(); this->InitializeSolutionStep(); this->Predict(); this->SolveSolutionStep(); this->FinalizeSolutionStep(); // TODO: Add something if necessary return 0.0; } /** * @brief Performs all the required operations that should be done (for each step) * before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { BaseType::mpConvergenceCriteria->SetEchoLevel(0); BaseType::InitializeSolutionStep(); BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel); mFinalizeWasPerformed = false; } /** * @brief Performs all the required operations that should be done (for each step) * after solving the solution step. */ void FinalizeSolutionStep() override { KRATOS_TRY; if (mFinalizeWasPerformed == false) { BaseType::FinalizeSolutionStep(); // To avoid compute twice the FinalizeSolutionStep mFinalizeWasPerformed = true; } KRATOS_CATCH(""); } /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { KRATOS_TRY; // bool is_converged = BaseType::SolveSolutionStep(); // FIXME: Requires to separate the non linear iterations // bool is_converged = BaseSolveSolutionStep(); // Direct solution bool is_converged = false; // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); if (r_model_part.IsNot(INTERACTION)) { // We get the system TSystemMatrixType& A = *BaseType::mpA; TSystemVectorType& Dx = *BaseType::mpDx; TSystemVectorType& b = *BaseType::mpb; // We get the process info ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); int inner_iteration = 0; while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) { ++inner_iteration; if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << std::endl << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << inner_iteration;; } // We solve one loop r_process_info[NL_ITERATION_NUMBER] = 1; r_process_info[INNER_LOOP_ITERATION] = inner_iteration; is_converged = BaseSolveSolutionStep(); // We check the convergence BaseType::mpConvergenceCriteria->SetEchoLevel(0); is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), A, Dx, b); BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel); if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { if (is_converged) std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl; else std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl; } } } else { // We compute the base loop r_model_part.GetProcessInfo()[INNER_LOOP_ITERATION] = 1; is_converged = BaseSolveSolutionStep(); } if (mThisParameters["adaptative_strategy"].GetBool()) { if (!is_converged) { is_converged = AdaptativeStep(); } } return is_converged; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ Parameters mThisParameters; /// The configuration parameters // ADAPTATIVE STRATEGY PARAMETERS bool mFinalizeWasPerformed; /// If the FinalizeSolutionStep has been already permformed ProcessesListType mpMyProcesses; /// The processes list ProcessesListType mpPostProcesses; /// The post processes list // OTHER PARAMETERS int mConvergenceCriteriaEchoLevel; /// The echo level of the convergence criteria ///@} ///@name Protected Operators ///@{ /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool BaseSolveSolutionStep() { KRATOS_TRY; // Pointers needed in the solution ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); typename TSchemeType::Pointer p_scheme = BaseType::GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver(); auto& r_dof_set = p_builder_and_solver->GetDofSet(); TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; // Initializing the parameters of the Newton-Raphson cicle IndexType iteration_number = 1; r_process_info[NL_ITERATION_NUMBER] = iteration_number; bool is_converged = false; bool residual_is_updated = false; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); // We do a geometry check before solve the system for first time if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT BEFORE FIRST SOLVE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } // Function to perform the building and the solving phase. if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); //Dx=0.00; TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); // We now check the geometry if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); if (is_converged) { // Initialisation of the convergence criteria BaseType::mpConvergenceCriteria->InitializeSolutionStep(r_model_part, r_dof_set, rA, rDx, rb); if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } // Iteration Cicle... performed only for NonLinearProblems while (is_converged == false && iteration_number++<BaseType::mMaxIterationNumber) { //setting the number of iteration r_process_info[NL_ITERATION_NUMBER] = iteration_number; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); //call the linear system solver to find the correction mDx for the //it is not called if there is no system to solve if (SparseSpaceType::Size(rDx) != 0) { if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false ) { if( BaseType::GetKeepSystemConstantDuringIterations() == false) { //A = 0.00; TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { KRATOS_WARNING("No DoFs") << "ATTENTION: no free DOFs!! " << std::endl; } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); // We now check the geometry if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); residual_is_updated = false; if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); residual_is_updated = true; //std::cout << "mb is calculated" << std::endl; } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } } // Plots a warning if the maximum number of iterations is exceeded if (iteration_number >= BaseType::mMaxIterationNumber && r_model_part.GetCommunicator().MyPID() == 0) MaxIterationsExceeded(); // Recalculate residual if needed // (note that some convergence criteria need it to be recalculated) if (residual_is_updated == false) { // NOTE: // The following part will be commented because it is time consuming // and there is no obvious reason to be here. If someone need this // part please notify the community via mailing list before uncommenting it. // Pooyan. // TSparseSpace::SetToZero(mb); // p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb); } // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); return is_converged; KRATOS_CATCH(""); } /** * @brief This method performs the adaptative step */ bool AdaptativeStep() { KRATOS_TRY; bool is_converged = false; // Plots a warning if the maximum number of iterations is exceeded if (mpMyProcesses == nullptr && StrategyBaseType::mEchoLevel > 0) KRATOS_WARNING("No python processes") << "If you have not implemented any method to recalculate BC or loads in function of time, this strategy will be USELESS" << std::endl; if (mpPostProcesses == nullptr && StrategyBaseType::mEchoLevel > 0) KRATOS_WARNING("No python post processes") << "If you don't add the postprocesses and the time step if splitted you won't postprocess that steps" << std::endl; ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); const double original_delta_time = r_process_info[DELTA_TIME]; // We save the delta time to restore later int split_number = 0; // We iterate until we reach the convergence or we split more than desired while (is_converged == false && split_number <= mThisParameters["max_number_splits"].GetInt()) { // Expliting time step as a way to try improve the convergence split_number += 1; double aux_delta_time, current_time; const double aux_time = SplitTimeStep(aux_delta_time, current_time); current_time += aux_delta_time; bool inside_the_split_is_converged = false; IndexType inner_iteration = 0; while (current_time <= aux_time) { inner_iteration += 1; r_process_info[STEP] += 1; if (inner_iteration == 1) { if (StrategyBaseType::MoveMeshFlag()) UnMoveMesh(); NodesArrayType& nodes_array = r_model_part.Nodes(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; it_node->OverwriteSolutionStepData(1, 0); // it_node->OverwriteSolutionStepData(2, 1); } r_process_info.SetCurrentTime(current_time); // Reduces the time step FinalizeSolutionStep(); } else { NodesArrayType& nodes_array = r_model_part.Nodes(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) (nodes_array.begin() + i)->CloneSolutionStepData(); r_process_info.CloneSolutionStepInfo(); r_process_info.ClearHistory(r_model_part.GetBufferSize()); r_process_info.SetAsTimeStepInfo(current_time); // Sets the new time step } // We execute the processes before the non-linear iteration if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteInitializeSolutionStep(); if (mpPostProcesses != nullptr) mpPostProcesses->ExecuteInitializeSolutionStep(); // In order to initialize again everything BaseType::mInitializeWasPerformed = false; mFinalizeWasPerformed = false; // We repeat the solve with the new DELTA_TIME this->Initialize(); this->InitializeSolutionStep(); this->Predict(); inside_the_split_is_converged = BaseType::SolveSolutionStep(); this->FinalizeSolutionStep(); // We execute the processes after the non-linear iteration if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteFinalizeSolutionStep(); if (mpPostProcesses != nullptr) mpPostProcesses->ExecuteFinalizeSolutionStep(); if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteBeforeOutputStep(); if (mpPostProcesses != nullptr) mpPostProcesses->PrintOutput(); if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteAfterOutputStep(); current_time += aux_delta_time; } if (inside_the_split_is_converged) is_converged = true; } // Plots a warning if the maximum number of iterations and splits are exceeded if (is_converged == false) MaxIterationsAndSplitsExceeded(); // Restoring original DELTA_TIME r_process_info[DELTA_TIME] = original_delta_time; return is_converged; KRATOS_CATCH(""); } /** * @brief Here the database is updated * @param A The LHS matrix * @param Dx The increment of solution after solving system * @param b The RHS vector * @param MoveMesh The flag that tells if the mesh should be moved */ void UpdateDatabase( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, const bool MoveMesh ) override { BaseType::UpdateDatabase(A,Dx,b,MoveMesh); // TODO: Add something if necessary } /** * @brief his method checks if there is no element inverted */ bool CheckGeometryInverted() { ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); bool inverted_element = false; ElementsArrayType& elements_array = r_model_part.Elements(); // NOT OMP for(int i = 0; i < static_cast<int>(elements_array.size()); ++i) { auto it_elem = elements_array.begin() + i; auto& geom = it_elem->GetGeometry(); if (geom.DeterminantOfJacobian(0) < 0.0) { if (mConvergenceCriteriaEchoLevel > 0) { KRATOS_WATCH(it_elem->Id()) KRATOS_WATCH(geom.DeterminantOfJacobian(0)) } return true; } // We check now the deformation gradient std::vector<Matrix> deformation_gradient_matrices; it_elem->CalculateOnIntegrationPoints( DEFORMATION_GRADIENT, deformation_gradient_matrices, r_process_info); for (IndexType i_gp = 0; i_gp < deformation_gradient_matrices.size(); ++i_gp) { const double det_f = MathUtils<double>::DetMat(deformation_gradient_matrices[i_gp]); if (det_f < 0.0) { if (mConvergenceCriteriaEchoLevel > 0) { KRATOS_WATCH(it_elem->Id()) KRATOS_WATCH(det_f) } return true; } } } return inverted_element; } /** * @brief Here the time step is splitted * @param AuxDeltaTime The new delta time to be considered * @param CurrentTime The current time * @return The destination time */ double SplitTimeStep( double& AuxDeltaTime, double& CurrentTime ) { KRATOS_TRY; const double aux_time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME]; AuxDeltaTime = StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME]; CurrentTime = aux_time - AuxDeltaTime; StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] = CurrentTime; // Restore time to the previous one AuxDeltaTime /= mThisParameters["split_factor"].GetDouble(); StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] = AuxDeltaTime; // Change delta time CoutSplittingTime(AuxDeltaTime, aux_time); return aux_time; KRATOS_CATCH(""); } /** * This method moves bak the mesh to the previous position */ void UnMoveMesh() { KRATOS_TRY; if (StrategyBaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(DISPLACEMENT_X) == false) KRATOS_ERROR << "It is impossible to move the mesh since the DISPLACEMENT var is not in the model_part. Either use SetMoveMeshFlag(False) or add DISPLACEMENT to the list of variables" << std::endl; NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates(); noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT, 1); } KRATOS_CATCH(""); } /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() { Parameters default_parameters = Parameters(R"( { "adaptative_strategy" : false, "split_factor" : 10.0, "max_number_splits" : 3, "inner_loop_iterations" : 5 })" ); return default_parameters; } /** * @brief This method prints information after solving the problem */ void CoutSolvingProblem() { if (mConvergenceCriteriaEchoLevel != 0) { std::cout << "STEP: " << StrategyBaseType::GetModelPart().GetProcessInfo()[STEP] << "\t NON LINEAR ITERATION: " << StrategyBaseType::GetModelPart().GetProcessInfo()[NL_ITERATION_NUMBER] << "\t TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] << "\t DELTA TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] << std::endl; } } /** * @brief This method prints information after split the increment of time * @param AuxDeltaTime The new time step to be considered * @param AuxTime The destination time */ void CoutSplittingTime( const double AuxDeltaTime, const double AuxTime ) { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { const double Time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME]; std::cout.precision(4); std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT("SPLITTING TIME STEP") << " |" << std::endl; std::cout << "| " << BOLDFONT("COMING BACK TO TIME: ") << std::scientific << Time << " |" << std::endl; std::cout << "| " << BOLDFONT(" NEW TIME STEP: ") << std::scientific << AuxDeltaTime << " |" << std::endl; std::cout << "| " << BOLDFONT(" UNTIL TIME: ") << std::scientific << AuxTime << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } /** * @brief This method prints information after reach the max number of interations */ void MaxIterationsExceeded() override { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } /** * @brief This method prints information after reach the max number of interations and splits */ void MaxIterationsAndSplitsExceeded() { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl; std::cout << "| " << BOLDFONT(FRED(" Max number of splits exceeded ")) << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ /** * Copy constructor. */ ResidualBasedNewtonRaphsonContactStrategy(const ResidualBasedNewtonRaphsonContactStrategy& Other) { }; private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedNewtonRaphsonContactStrategy */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // namespace Kratos #endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY */
veccopy.c
#include <stdio.h> #include <omp.h> int main() { int N = 10; int a[N]; int b[N]; int i; for (i=0; i<N; i++) a[i]=0; for (i=0; i<N; i++) b[i]=i; #pragma omp target parallel for { for (int j = 0; j< N; j++) a[j]=b[j]; } int rc = 0; for (i=0; i<N; i++) if (a[i] != b[i] ) { rc++; printf ("Wrong varlue: a[%d]=%d\n", i, a[i]); } if (!rc) printf("Success\n"); return rc; }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 16; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
EW.c
// SW4 LICENSE // # ---------------------------------------------------------------------- // # SW4 - Seismic Waves, 4th order // # ---------------------------------------------------------------------- // # Copyright (c) 2013, Lawrence Livermore National Security, LLC. // # Produced at the Lawrence Livermore National Laboratory. // # // # Written by: // # N. Anders Petersson (petersson1@llnl.gov) // # Bjorn Sjogreen (sjogreen2@llnl.gov) // # // # LLNL-CODE-643337 // # // # All rights reserved. // # // # This file is part of SW4, Version: 1.0 // # // # Please also read LICENCE.txt, which contains "Our Notice and GNU General Public License" // # // # This program is free software; you can redistribute it and/or modify // # it under the terms of the GNU General Public License (as published by // # the Free Software Foundation) version 2, dated June 1991. // # // # This program is distributed in the hope that it will be useful, but // # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF // # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and // # conditions of the GNU General Public License for more details. // # // # You should have received a copy of the GNU General Public License // # along with this program; if not, write to the Free Software // # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA #include "sw4.h" #include "EW.h" #include <sstream> #include <fstream> #ifdef SW4_OPENMP #include <omp.h> #endif #include <mpi.h> #include <cstring> #include <cstdlib> #include <cstdio> #include <unistd.h> #include <errno.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <algorithm> #include <cmath> #include "Source.h" #include "GridPointSource.h" #include "CheckPoint.h" #include "MaterialBlock.h" #include "TimeSeries.h" #include "F77_FUNC.h" #include "EWCuda.h" extern "C" { void F77_FUNC(dspev,DSPEV)(char & JOBZ, char & UPLO, int & N, double *AP, double *W, double *Z, int & LDZ, double *WORK, int & INFO); } void rhs4sg_rev( int ifirst, int ilast, int jfirst, int jlast, int kfirst, int klast, int nk, int* onesided, float_sw4* a_acof, float_sw4* a_bope, float_sw4* a_ghcof, float_sw4* a_lu, float_sw4* a_u, float_sw4* a_mu, float_sw4* a_lambda, float_sw4 h, float_sw4* a_strx, float_sw4* a_stry, float_sw4* a_strz ); void rhs4sg( int ifirst, int ilast, int jfirst, int jlast, int kfirst, int klast, int nk, int* onesided, float_sw4* a_acof, float_sw4* a_bope, float_sw4* a_ghcof, float_sw4* a_lu, float_sw4* a_u, float_sw4* a_mu, float_sw4* a_lambda, float_sw4 h, float_sw4* a_strx, float_sw4* a_stry, float_sw4* a_strz ); void rhs4sgcurv_rev( int ifirst, int ilast, int jfirst, int jlast, int kfirst, int klast, float_sw4* a_u, float_sw4* a_mu, float_sw4* a_lambda, float_sw4* a_met, float_sw4* a_jac, float_sw4* a_lu, int* onesided, float_sw4* acof, float_sw4* bope, float_sw4* ghcof, float_sw4* a_strx, float_sw4* a_stry ); void rhs4sgcurv( int ifirst, int ilast, int jfirst, int jlast, int kfirst, int klast, float_sw4* a_u, float_sw4* a_mu, float_sw4* a_lambda, float_sw4* a_met, float_sw4* a_jac, float_sw4* a_lu, int* onesided, float_sw4* acof, float_sw4* bope, float_sw4* ghcof, float_sw4* a_strx, float_sw4* a_stry ); EW::EW( const string& filename ) : mCFL(1.3), mTstart(0.0), mTmax(0.0), mTimeIsSet(false), mNumberOfTimeSteps(-1), mPrintInterval(100), m_ghost_points(2), m_ext_ghost_points(2), m_ppadding(2), mVerbose(0), mQuiet(false), m_supergrid_damping_coefficient(0.02), m_sg_damping_order(4), m_sg_gp_thickness(20), m_use_supergrid(false), m_checkfornan(false), m_topography_exists(false), m_grid_interpolation_order(4), m_zetaBreak(0.95), m_point_source_test(false), mPath("./"), m_moment_test(false), m_pfs(false), m_nwriters(8), m_output_detailed_timing(false), m_save_trace(false), m_ndevice(0), m_corder(false), mGeoAz(0.0), mLonOrigin(-118.0), mLatOrigin(37.0), mMetersPerDegree(111319.5), mMetersPerLongitude(87721.0), mConstMetersPerLongitude(false) { m_gpu_blocksize[0] = 16; m_gpu_blocksize[1] = 16; m_gpu_blocksize[2] = 1; if( sizeof(float_sw4) == 4 ) m_mpifloat = MPI_FLOAT; else if( sizeof(float_sw4) == 8 ) m_mpifloat = MPI_DOUBLE; else CHECK_INPUT(false,"Error, could not identify float_sw4"); MPI_Comm_rank( MPI_COMM_WORLD, &m_myrank ); MPI_Comm_size( MPI_COMM_WORLD, &m_nprocs ); m_restart_check_point = CheckPoint::nil; parseInputFile( filename ); setupRun( ); timesteploop( mU, mUm ); } //----------------------------------------------------------------------- int EW::computeEndGridPoint( float_sw4 maxval, float_sw4 h ) { const float_sw4 reltol = 1e-5; const float_sw4 abstol = 1e-12; float_sw4 fnpts = round(maxval/h+1); int npts; if( fabs((fnpts-1)*h-maxval) < reltol*fabs(maxval)+abstol ) npts = static_cast<int>(fnpts); else npts = static_cast<int>(fnpts)+1; return npts; } //----------------------------------------------------------------------- bool EW::startswith(const char begin[], char *line) { int lenb = strlen(begin); // We ignore any preceeding whitespace while (strncmp(line, " ", 1) == 0 || strncmp(line, "\t", 1) == 0) line++; if (strncmp(begin, line, lenb) == 0) return true; else return false; } //----------------------------------------------------------------------- void EW::badOption(string name, char* option) const { if (m_myrank == 0) cout << "\tWarning: ignoring " << name << " line option '" << option << "'" << endl; } //----------------------------------------------------------------------- void EW::processGrid( char* buffer ) { float_sw4 x = 0.0, y=0.0, z=0.0, h=0.0; int nx=0, ny=0, nz=0; stringstream gridSetupErrStream; gridSetupErrStream << endl << "----------------------------------------" << endl << " Only five ways to setup grid: " << endl << " 1. provide h and nx, ny, nz " << endl << " 2. provide h and x, y, z " << endl << " 3. provide x,y,z and nx " << endl << " 4. provide x,y,z and ny " << endl << " 5. provide x,y,z and nz " << endl << "----------------------------------------" << endl << endl; string gridSetupErr = gridSetupErrStream.str(); char* token = strtok(buffer, " \t"); token = strtok(NULL, " \t"); string err = "ERROR in ProcessGrid: "; if( m_myrank == 0 ) cout << endl << "* Processing the grid command..." << endl; while (token != NULL) { // while there are tokens in the string still if (startswith("#", token) || startswith(" ", buffer)) // Ignore commented lines and lines with just a space. break; if (startswith("ny=", token)) { token += 3; CHECK_INPUT(atoi(token) > 0, err << "ny is not a positive integer: " << token); ny = atoi(token); } else if (startswith("nx=", token)) { token += 3; CHECK_INPUT(atoi(token) > 0, err << "nx is not a positive integer: " << token); nx = atoi(token); } else if (startswith("nz=", token)) { token += 3; CHECK_INPUT(atoi(token) >= 0, err << "nz is not a positive integer: " << token); nz = atoi(token); } else if (startswith("x=", token)) { token += 2; CHECK_INPUT(atof(token) > 0.0, err << "x is not a positive float: " << token); x = atof(token); } else if (startswith("y=", token)) { token += 2; CHECK_INPUT(atof(token) >= 0.0, err << "y is negative: " << token); y = atof(token); } else if (startswith("z=", token)) { token += 2; CHECK_INPUT(atof(token) > 0.0, err << "z is not a positive float: " << token); z = atof(token); } else if (startswith("h=", token)) { token += 2; CHECK_INPUT(atof(token) > 0.0, err << "h is not a positive float: " << token); h = atof(token); } else { badOption("grid", token); } token = strtok(NULL, " \t"); } //-------------------------------------------------------------------- // There are only three ways to specify a grid. //-------------------------------------------------------------------- if (h != 0.0) { if (nx > 0 || nz > 0 || ny > 0) { //---------------------------------------------------------------- // 1. nx, [ny], nz and h //---------------------------------------------------------------- CHECK_INPUT(nx && nz, gridSetupErr); CHECK_INPUT(x == 0.0 && y == 0.0 && z == 0.0, gridSetupErr); } else { //-------------------------------------------------------------- // 2. x, [y], z and h //-------------------------------------------------------------- CHECK_INPUT(x > 0.0 && z > 0.0, gridSetupErr); CHECK_INPUT(nx == 0 && ny == 0 && nz == 0, gridSetupErr); } } else { //-------------------------------------------------------------------- // 3. x, [y], z and nx|ny|nz //-------------------------------------------------------------------- CHECK_INPUT(x > 0.0 && z > 0.0, gridSetupErr); CHECK_INPUT((nx > 0) + (ny > 0) + (nz > 0) == 1, gridSetupErr); } int nxprime, nyprime, nzprime; float_sw4 xprime, yprime, zprime; if (nx > 0 && h == 0.0) { // we set the number grid points in the x direction // so we'll compute the grid spacing from that. h = x / (nx-1); if (m_myrank == 0) cout << "Setting h to " << h << " from x/(nx-1) (x=" << x << ", nx=" << nx << ")" << endl; nxprime = nx; nzprime = computeEndGridPoint(z, h); nyprime = computeEndGridPoint(y, h); } else if (ny > 0 && h == 0.0) { // set hte number of grid points from y direction and ny h = y/(ny-1); if (m_myrank == 0) cout << "Setting h to " << h << " from y/(ny-1) (y=" << y << ", ny=" << ny << ")" << endl; nyprime = ny; nxprime = computeEndGridPoint(x, h); nzprime = computeEndGridPoint(z, h); } else if (nz > 0 && h == 0.0) { // set the number of grid points from z direction and nz h = z/(nz-1); if (m_myrank == 0) cout << "Setting h to " << h << " from z/(nz-1) (z=" << z << ", nz=" << nz << ")" << endl; nzprime = nz; nxprime = computeEndGridPoint(x, h); nyprime = computeEndGridPoint(y, h); } else { //---------------------------------------------------- // h was set by the user, so compute the appropriate // nx, ny, and nz or x, y, z. //---------------------------------------------------- if (nx == 0 && x != 0.0) nxprime = computeEndGridPoint(x, h); else if (nx != 0) nxprime = nx; else CHECK_INPUT(0, gridSetupErr); if (nz == 0 && z != 0.0) nzprime = computeEndGridPoint(z, h); else if (nz != 0) nzprime = nz; else CHECK_INPUT(0, gridSetupErr); if (ny == 0 && y != 0.0) nyprime = computeEndGridPoint(y, h); else if (ny != 0) nyprime = ny; else CHECK_INPUT(0, gridSetupErr); } if (m_myrank == 0 && mVerbose >=3) printf("Setting up the grid for a non-periodic problem\n"); if (nxprime != nx && m_myrank == 0) cout << "Setting nx to " << nxprime << " to be consistent with h=" << h << endl; if (nyprime != ny && m_myrank == 0) cout << "Setting ny to " << nyprime << " to be consistent with h=" << h << endl; if (nzprime != nz && m_myrank == 0) cout << "Setting nz to " << nzprime << " to be consistent with h=" << h << endl; // ------------------------------------------------------------- // Now we adjust the geometry bounds based on the actual // number of grid points used in each dimension. // ------------------------------------------------------------- xprime = (nxprime-1)*h; zprime = (nzprime-1)*h; yprime = (nyprime-1)*h; float_sw4 eps = 1.e-9*sqrt(xprime*xprime+yprime*yprime+zprime*zprime); if (fabs(xprime-x) > eps && m_myrank == 0) cout << "Changing x from " << x << " to " << xprime << " to be consistent with h=" << h << endl; if (fabs(zprime-z) > eps && m_myrank == 0) cout << "Changing z from " << z << " to " << zprime << " to be consistent with h=" << h << endl; if (fabs(yprime-y) > eps && m_myrank == 0) cout << "Changing y from " << y << " to " << yprime << " to be consistent with h=" << h << endl; m_nx_base = nxprime; m_ny_base = nyprime; m_nz_base = nzprime; m_h_base = h; m_global_xmax = xprime; m_global_ymax = yprime; m_global_zmax = zprime; m_global_zmin = 0; } //----------------------------------------------------------------------- void EW::processTime(char* buffer) { float_sw4 t=0.0; int steps = -1; char* token = strtok(buffer, " \t"); token = strtok(NULL, " \t"); string err = "ERROR in processTime: "; while (token != NULL) { // while there are still tokens in the string if (startswith("#", token) || startswith(" ", buffer)) // Ignore commented lines and lines with just a space. break; if (startswith("t=", token)) { token += 2; // skip t= CHECK_INPUT(atof(token) >= 0.0, err << "t is not a positive float: " << token); t = atof(token); } else if (startswith("steps=", token)) { token += 6; // skip steps= CHECK_INPUT(atoi(token) >= 0, err << "steps is not a non-negative integer: " << token); steps = atoi(token); } else { badOption("time", token); } token = strtok(NULL, " \t"); } CHECK_INPUT(!( (t > 0.0) && (steps >= 0) ), "Time Error: Cannot set both t and steps for time"); if (t > 0.0) { mTmax = t; mTstart = 0; mTimeIsSet = true; } else if (steps >= 0) { mTstart = 0; mNumberOfTimeSteps = steps; mTimeIsSet = false; } // Set UTC as current date time_t tsec; time( &tsec ); struct tm *utctime = gmtime( &tsec ); m_utc0[0] = utctime->tm_year+1900; m_utc0[1] = utctime->tm_mon+1; m_utc0[2] = utctime->tm_mday; m_utc0[3] = utctime->tm_hour; m_utc0[4] = utctime->tm_min; m_utc0[5] = utctime->tm_sec; m_utc0[6] = 0; //milliseconds not given by 'time', not needed here. } //----------------------------------------------------------------------- void EW::processTopography(char * buffer ) { // // Note, m_topoFileName, m_topoExtFileName, m_maxIter, m_EFileResolution, m_QueryTyp could // have been declared local variables in EW::parseInputFile, and transfered as // procedure parameters to smoothTopography and getEfileInfo // char* token = strtok(buffer, " \t"); CHECK_INPUT(strcmp("topography", token) == 0, "ERROR: not a topography line...: " << token); string topoFile="surf.tp", style, fileName; bool needFileName=false, gotFileName=false; m_zetaBreak=0.95; m_grid_interpolation_order = 4; m_use_analytical_metric = false; token = strtok(NULL, " \t"); while (token != NULL) { // while there are still tokens in the string if (startswith("#", token) || startswith(" ", buffer)) // Ignore commented lines and lines with just a space. break; if (startswith("zmax=", token)) { token += 5; // skip logfile= m_topo_zmax = atof(token); } // // 1234567890 else if (startswith("order=", token)) { token += 6; // skip logfile= m_grid_interpolation_order = atoi(token); if (m_grid_interpolation_order < 2 || m_grid_interpolation_order > 7) { if (m_myrank == 0) cout << "order needs to be 2,3,4,5,6,or 7 not: " << m_grid_interpolation_order << endl; MPI_Abort(MPI_COMM_WORLD, 1); } } // 123456789 else if( startswith("zetabreak=", token) ) // developer option: not documented in user's guide { token += 10; m_zetaBreak = atof(token); CHECK_INPUT( m_zetaBreak > 0 && m_zetaBreak <= 1, "Error: zetabreak must be in [0,1], not " << m_zetaBreak); } else if( startswith("input=", token ) ) { token += 6; style = token; if( strcmp("gaussian", token) == 0) // else if (strcmp("gaussian", token) == 0) { m_topoInputStyle=GaussianHill; m_topography_exists=true; } else { badOption("topography> input", token); } } else if( startswith("file=", token ) ) { token += 5; m_topoFileName = token; gotFileName=true; } else if( startswith("gaussianAmp=", token ) ) { token += 12; m_GaussianAmp = atof(token); } else if( startswith("gaussianXc=", token ) ) { token += 11; m_GaussianXc = atof(token); } else if( startswith("gaussianYc=", token ) ) { token += 11; m_GaussianYc = atof(token); } else if( startswith("gaussianLx=", token ) ) { token += 11; m_GaussianLx = atof(token); } else if( startswith("gaussianLy=", token ) ) { token += 11; m_GaussianLy = atof(token); } else if( startswith("analyticalMetric=", token ) ) { token += 17; m_use_analytical_metric = strcmp(token,"1")==0 || strcmp(token,"true")==0 || strcmp(token,"yes")==0; } else { badOption("topography", token); } token = strtok(NULL, " \t"); } if (needFileName) CHECK_INPUT(gotFileName, "ERROR: no topography file name specified...: " << token); CHECK_INPUT(m_topoInputStyle == GaussianHill, "Topography style " << m_topoInputStyle << " not yet implemented " << endl); if( m_topoInputStyle != GaussianHill && m_use_analytical_metric ) { m_use_analytical_metric = false; if( m_myrank == 0 ) cout << "Analytical metric only defined for Gaussian Hill topography" << " topography analyticalMetric option will be ignored " << endl; } } //----------------------------------------------------------------------- void EW::processFileIO(char* buffer) { char* token = strtok(buffer, " \t"); CHECK_INPUT(strcmp("fileio", token) == 0, "ERROR: not a fileio line...: " << token); token = strtok(NULL, " \t"); string err = "FileIO Error: "; while (token != NULL) { if (startswith("#", token) || startswith(" ", buffer)) break; if(startswith("path=", token)) { token += 5; // skip path= mPath = token; mPath += '/'; // path = token; } else if (startswith("verbose=", token)) { token += 8; // skip verbose= CHECK_INPUT(atoi(token) >= 0, err << "verbose must be non-negative, not: " << token); mVerbose = atoi(token); } else if (startswith("printcycle=", token)) { token += 11; // skip printcycle= CHECK_INPUT(atoi(token) > -1, err << "printcycle must be zero or greater, not: " << token); mPrintInterval = atoi(token); } else if (startswith("pfs=", token)) { token += 4; // skip pfs= m_pfs = (atoi(token) == 1); } else if (startswith("nwriters=", token)) { token += 9; // skip nwriters= CHECK_INPUT(atoi(token) > 0, err << "nwriters must be positive, not: " << token); m_nwriters = atoi(token); } else { badOption("fileio", token); } token = strtok(NULL, " \t"); } } //----------------------------------------------------------------------- void EW::processCheckPoint(char* buffer) { char* token = strtok(buffer, " \t"); CHECK_INPUT(strcmp("checkpoint", token) == 0, "ERROR: not a checkpoint line...: " << token); token = strtok(NULL, " \t"); string err = "CheckPoint Error: "; int cycle=-1, cycleInterval=0; float_sw4 time=0.0, timeInterval=0.0; bool timingSet=false; string filePrefix = "restart"; size_t bufsize=10000000; while (token != NULL) { if (startswith("#", token) || startswith(" ", buffer)) break; if (startswith("time=", token) ) { token += 5; // skip time= CHECK_INPUT( atof(token) >= 0., err << "time must be a non-negative number, not: " << token); time = atof(token); timingSet = true; } else if (startswith("timeInterval=", token) ) { token += 13; // skip timeInterval= CHECK_INPUT( atof(token) >= 0., err<< "timeInterval must be a non-negative number, not: " << token); timeInterval = atof(token); timingSet = true; } else if (startswith("cycle=", token) ) { token += 6; // skip cycle= CHECK_INPUT( atoi(token) >= 0., err << "cycle must be a non-negative integer, not: " << token); cycle = atoi(token); timingSet = true; } else if (startswith("cycleInterval=", token) ) { token += 14; // skip cycleInterval= CHECK_INPUT( atoi(token) >= 0., err << "cycleInterval must be a non-negative integer, not: " << token); cycleInterval = atoi(token); timingSet = true; } else if (startswith("file=", token)) { token += 5; // skip file= filePrefix = token; } else if (startswith("bufsize=", token)) { token += 8; // skip bufsize= bufsize = atoi(token); } else { badOption("checkpoint", token); } token = strtok(NULL, " \t"); } CHECK_INPUT( timingSet, "Processing checkpoint command: " << "at least one timing mechanism must be set: cycle, time, cycleInterval or timeInterval" << endl ); CheckPoint* chkpt = new CheckPoint( this, time, timeInterval, cycle, cycleInterval, filePrefix, bufsize ); m_check_points.push_back(chkpt); } //----------------------------------------------------------------------- void EW::processRestart(char* buffer) { char* token = strtok(buffer, " \t"); CHECK_INPUT(strcmp("restart", token) == 0, "ERROR: not a restart line...: " << token); token = strtok(NULL, " \t"); string fileName; bool filenamegiven = false; size_t bufsize=10000000; while (token != NULL) { if (startswith("#", token) || startswith(" ", buffer)) break; if (startswith("file=", token) ) { token += 5; // skip file= fileName = token; filenamegiven = true; } else if (startswith("bufsize=", token)) { token += 8; // skip bufsize= bufsize = atoi(token); } else { badOption("restart", token); } token = strtok(NULL, " \t"); } CHECK_INPUT( filenamegiven, "Processing restart command: " << "restart file name must be given" << endl ); CHECK_INPUT( m_restart_check_point == CheckPoint::nil, "Processing restart command: "<< " There can only be one restart file"); m_restart_check_point = new CheckPoint( this, fileName, bufsize ); } //----------------------------------------------------------------------- void EW::processTestPointSource(char* buffer) { char* token = strtok(buffer, " \t"); token = strtok(NULL, " \t"); float_sw4 cs = 1.0, rho=1.0, cp=sqrt(3.0); bool free_surface=false; while (token != NULL) { if (startswith("#", token) || startswith(" ", buffer)) break; if (startswith("cp=", token)) { token += 3; cp = atof(token); } else if (startswith("cs=", token)) { token += 3; cs = atof(token); } else if (startswith("rho=", token)) { token += 4; rho = atof(token); } else if (startswith("diractest=", token)) { token += 10; if( strcmp(token,"1")==0 || strcmp(token,"true")==0 ) m_moment_test = true; } else if (startswith("halfspace=", token)) { token += 10; free_surface = ( strcmp(token,"1")==0 || strcmp(token,"true")==0 ); } else { badOption("testpointsource", token); } token = strtok(NULL, " \t"); } m_point_source_test = true; float_sw4 mu = rho*cs*cs; float_sw4 la = rho*cp*cp-2*mu; for( int g=0 ; g < mNumberOfGrids ; g++ ) { mRho[g].set_value(rho); mMu[g].set_value(mu); mLambda[g].set_value(la); } for( int side=0 ; side < 6 ; side++ ) mbcGlobalType[side]=bSuperGrid; if( free_surface ) mbcGlobalType[4]=bStressFree; } //---------------------------------------------------------------------------- void EW::processSource( char* buffer ) { Source* sourcePtr; float_sw4 m0 = 1.0; float_sw4 t0=0.0, f0=1.0, freq=1.0; // Should be center of the grid float_sw4 x = 0.0, y = 0.0, z = 0.0; // int i = 0, j = 0, k = 0; float_sw4 mxx=0.0, mxy=0.0, mxz=0.0, myy=0.0, myz=0.0, mzz=0.0; // float_sw4 strike=0.0, dip=0.0, rake=0.0; float_sw4 fx=0.0, fy=0.0, fz=0.0; int isMomentType = -1; // float_sw4 lat = 0.0, lon = 0.0, depth = 0.0; float_sw4 depth= 0.0; bool topodepth = false, depthSet=false, zSet=false; bool cartCoordSet = false; float_sw4* par=NULL; int* ipar=NULL; int npar=0, nipar=0; int ncyc = 5; timeDep tDep = iRickerInt; char formstring[100]; // char dfile[1000]; strcpy(formstring, "Ricker"); char* token = strtok(buffer, " \t"); token = strtok(NULL, " \t"); string err = "ERROR in ProcessSource: "; // string cartAndGeoErr = "source command: Cannot set both a geographical (lat,lon) and cartesian coordinate (x,y)"; string pointAndMomentErr = "source command: Cannot set both a point source and moment tensor formulation"; while (token != NULL) { // while there are tokens in the string still if (startswith("#", token) || startswith(" ", buffer)) // Ignore commented lines and lines with just a space. break; if (startswith("m0=", token) ) { token += 3; // skip m0= CHECK_INPUT(atof(token) >= 0.0, err << "source command: scalar moment term must be positive, not: " << token); m0 = atof(token); } else if (startswith("x=", token)) { token += 2; // skip x= x = atof(token); cartCoordSet = true; } else if (startswith("y=", token)) { token += 2; // skip y= y = atof(token); cartCoordSet = true; } else if (startswith("z=", token)) { token += 2; // skip z= // with topography, the z-coordinate can have both signs! z = atof(token); topodepth=false; // this is absolute depth zSet = true; } else if (startswith("depth=", token)) // this is the same as topodepth: different from WPP { token += 6; // skip depth= depth = atof(token); topodepth = true; CHECK_INPUT(depth >= 0.0, err << "source command: Depth below topography must be greater than or equal to zero"); depthSet=true; } else if (startswith("Mxx=", token) || startswith("mxx=", token)) { CHECK_INPUT(isMomentType != 0, err << pointAndMomentErr); token += 4; // skip Mxx= mxx = atof(token); isMomentType = 1; } else if (startswith("Mxy=", token) || startswith("mxy=", token)) { CHECK_INPUT(isMomentType != 0, err << pointAndMomentErr); token += 4; // skip Mxy= mxy = atof(token); isMomentType = 1; } else if (startswith("Mxz=", token) || startswith("mxz=", token)) { CHECK_INPUT(isMomentType != 0, err << pointAndMomentErr); token += 4; // skip Mxz= mxz = atof(token); isMomentType = 1; } else if (startswith("Myy=", token) || startswith("myy=", token)) { CHECK_INPUT(isMomentType != 0, err << pointAndMomentErr); token += 4; // skip Myy= myy = atof(token); isMomentType = 1; } else if (startswith("Myz=", token) || startswith("myz=", token)) { CHECK_INPUT(isMomentType != 0, err << pointAndMomentErr); token += 4; // skip Myz= myz = atof(token); isMomentType = 1; } else if (startswith("Mzz=", token) || startswith("mzz=", token)) { CHECK_INPUT(isMomentType != 0, err << pointAndMomentErr); token += 4; // skip Mzz= mzz = atof(token); isMomentType = 1; } else if (startswith("Fz=", token) || startswith("fz=", token)) { CHECK_INPUT(isMomentType != 1, err << pointAndMomentErr); token += 3; // skip Fz= fz = atof(token); isMomentType = 0; } else if (startswith("Fx=", token) || startswith("fx=", token)) { CHECK_INPUT(isMomentType != 1, err << pointAndMomentErr); token += 3; // skip Fx= fx = atof(token); isMomentType = 0; } else if (startswith("Fy=", token) || startswith("fy=", token)) { CHECK_INPUT(isMomentType != 1, err << pointAndMomentErr); token += 3; // skip Fy= fy = atof(token); isMomentType = 0; } else if (startswith("t0=", token)) { token += 3; // skip t0= t0 = atof(token); } else if (startswith("freq=", token)) { token += 5; // skip freq= freq = atof(token); CHECK_INPUT(freq > 0, err << "source command: Frequency must be > 0"); } else if (startswith("f0=", token)) { CHECK_INPUT(isMomentType != 1, err << "source command: Cannot set force amplitude for moment tensor terms"); token += strlen("f0="); f0 = atof(token); } else if (startswith("type=",token)) { token += 5; strncpy(formstring, token,100); if (!strcmp("Ricker",formstring)) tDep = iRicker; else if (!strcmp("Gaussian",formstring)) tDep = iGaussian; else if (!strcmp("Ramp",formstring)) tDep = iRamp; else if (!strcmp("Triangle",formstring)) tDep = iTriangle; else if (!strcmp("Sawtooth",formstring)) tDep = iSawtooth; else if (!strcmp("SmoothWave",formstring)) tDep = iSmoothWave; else if (!strcmp("Erf",formstring) || !strcmp("GaussianInt",formstring) ) tDep = iErf; else if (!strcmp("VerySmoothBump",formstring)) tDep = iVerySmoothBump; else if (!strcmp("RickerInt",formstring) ) tDep = iRickerInt; else if (!strcmp("Brune",formstring) ) tDep = iBrune; else if (!strcmp("BruneSmoothed",formstring) ) tDep = iBruneSmoothed; else if (!strcmp("DBrune",formstring) ) tDep = iDBrune; else if (!strcmp("GaussianWindow",formstring) ) tDep = iGaussianWindow; else if (!strcmp("Liu",formstring) ) tDep = iLiu; else if (!strcmp("Dirac",formstring) ) tDep = iDirac; else if (!strcmp("C6SmoothBump",formstring) ) tDep = iC6SmoothBump; else if (m_myrank == 0) cout << "unknown time function: " << formstring << endl << " using default RickerInt function." << endl; } else { badOption("source", token); } token = strtok(NULL, " \t"); } CHECK_INPUT(depthSet || zSet, err << "source command: depth, topodepth or z-coordinate must be specified"); if (depthSet) { z = depth; } if (cartCoordSet) { float_sw4 xmin = 0.; float_sw4 ymin = 0.; float_sw4 zmin; // only check the z>zmin when we have topography. For a flat free surface, we will remove sources too // close or above the surface in the call to mGlobalUniqueSources[i]->correct_Z_level() if (m_topography_exists) // topography command must be read before the source command zmin = m_global_zmin; else zmin = 0; if ( (m_topography_exists && (x < xmin || x > m_global_xmax || y < ymin || y > m_global_ymax )) || (!m_topography_exists && (x < xmin || x > m_global_xmax || y < ymin || y > m_global_ymax || z < zmin || z > m_global_zmax)) ) { stringstream sourceposerr; sourceposerr << endl << "***************************************************" << endl << " FATAL ERROR: Source positioned outside grid! " << endl << endl << " Source Type: " << formstring << endl << " @ x=" << x << " y=" << y << " z=" << z << endl << endl; if ( x < xmin ) sourceposerr << " x is " << xmin - x << " meters away from min x (" << xmin << ")" << endl; else if ( x > m_global_xmax) sourceposerr << " x is " << x - m_global_xmax << " meters away from max x (" << m_global_xmax << ")" << endl; if ( y < ymin ) sourceposerr << " y is " << ymin - y << " meters away from min y (" << ymin << ")" << endl; else if ( y > m_global_ymax) sourceposerr << " y is " << y - m_global_ymax << " meters away from max y (" << m_global_ymax << ")" << endl; if ( z < zmin ) sourceposerr << " z is " << zmin - z << " meters away from min z (" << zmin << ")" << endl; else if ( z > m_global_zmax) sourceposerr << " z is " << z - m_global_zmax << " meters away from max z (" << m_global_zmax << ")" << endl; sourceposerr << "***************************************************" << endl; if (m_myrank == 0) cout << sourceposerr.str(); MPI_Abort(MPI_COMM_WORLD, 1); } } if (isMomentType) { // Remove amplitude variable mxx *= m0; mxy *= m0; mxz *= m0; myy *= m0; myz *= m0; mzz *= m0; // these have global location since they will be used by all processors sourcePtr = new Source(this, freq, t0, x, y, z, mxx, mxy, mxz, myy, myz, mzz, tDep, formstring, topodepth, ncyc, par, npar, ipar, nipar, false ); // false is correctStrengthForMu if (sourcePtr->ignore()) { delete sourcePtr; } else { m_globalUniqueSources.push_back(sourcePtr); } } else // point forcing { // Remove amplitude variable fx *= f0; fy *= f0; fz *= f0; // global version (gets real coordinates) sourcePtr = new Source(this, freq, t0, x, y, z, fx, fy, fz, tDep, formstring, topodepth, ncyc, par, npar, ipar, nipar, false ); // false is correctStrengthForMu //...and add it to the list of forcing terms if (sourcePtr->ignore()) { delete sourcePtr; } else { m_globalUniqueSources.push_back(sourcePtr); } } } //----------------------------------------------------------------------- void EW::processSuperGrid(char *buffer) { char* token = strtok(buffer, " \t"); token = strtok(NULL, " \t"); int sg_thickness; // sg_transition; float_sw4 sg_coeff; bool thicknessSet=false, dampingCoeffSet=false; // , transitionSet=false while (token != NULL) { if (startswith("#", token) || startswith(" ", buffer)) // Ignore commented lines and lines with just a space. break; if (startswith("gp=", token)) // in number of grid sizes (different from WPP) { token += 3; sg_thickness = atoi(token); CHECK_INPUT(sg_thickness>0, "The number of grid points in the supergrid damping layer must be positive, not: "<< sg_thickness); thicknessSet = true; } else if (startswith("dc=", token)) { token += 3; sg_coeff = atof(token); CHECK_INPUT(sg_coeff>=0., "The supergrid damping coefficient must be non-negative, not: "<<sg_coeff); dampingCoeffSet=true; } else { badOption("supergrid", token); } token = strtok(NULL, " \t"); } // end while token if (thicknessSet) m_sg_gp_thickness = sg_thickness; if (dampingCoeffSet) m_supergrid_damping_coefficient = sg_coeff; else if( m_sg_damping_order == 4 ) m_supergrid_damping_coefficient = 0.02; else if( m_sg_damping_order == 6 ) m_supergrid_damping_coefficient = 0.005; } //----------------------------------------------------------------------- void EW::processDeveloper(char* buffer) { char* token = strtok(buffer, " \t"); CHECK_INPUT(strcmp("developer", token) == 0, "ERROR: not a developer line...: " << token); token = strtok(NULL, " \t"); while (token != NULL) { // while there are tokens in the string still if (startswith("#", token) || startswith(" ", buffer)) // Ignore commented lines and lines with just a space. break; if( startswith("cfl=",token) ) { token += 4; float_sw4 cfl = atof(token); CHECK_INPUT( cfl > 0, "Error negative CFL number"); // set_cflnumber( cfl ); mCFL = cfl; } else if( startswith("checkfornan=",token) ) { token += 12; m_checkfornan = strcmp(token,"1")==0 || strcmp(token,"on")==0 || strcmp(token,"yes")==0; } else if( startswith("reporttiming=",token) ) { token += 13; m_output_detailed_timing = strcmp(token,"1")==0 || strcmp(token,"on")==0 || strcmp(token,"yes")==0; } else if( startswith("trace=",token) ) { token += 6; m_save_trace = strcmp(token,"yes")==0 || strcmp(token,"1")==0 || strcmp(token,"on")==0; } else if( startswith("thblocki=",token) ) { token += 9; m_gpu_blocksize[0] = atoi(token); } else if( startswith("thblockj=",token) ) { token += 9; m_gpu_blocksize[1] = atoi(token); } else if( startswith("thblockk=",token) ) { token += 9; m_gpu_blocksize[2] = atoi(token); } else if( startswith("corder=",token) ) { token += 7; m_corder = strcmp(token,"yes")==0 || strcmp(token,"1")==0 || strcmp(token,"on")==0; Sarray::m_corder = m_corder; } else { badOption("developer", token); } token = strtok(NULL, " \t"); } } //------------------------------------------------------------------------ void EW::processMaterialBlock( char* buffer ) { float_sw4 vpgrad=0.0, vsgrad=0.0, rhograd=0.0; bool x1set=false, x2set=false, y1set=false, y2set=false, z1set=false, z2set=false; float_sw4 x1=0.0, x2=0.0, y1=0.0, y2=0.0, z1=0.0, z2=0.0; // int i1=-1, i2=-1, j1=-1, j2=-1, k1=-1, k2=-1; string name = "Block"; char* token = strtok(buffer, " \t"); CHECK_INPUT(strcmp("block", token) == 0, "ERROR: material block can be set by a block line, not: " << token); string err = token; err += " Error: "; token = strtok(NULL, " \t"); float_sw4 vp=-1, vs=-1, rho=-1, qp=-1, qs=-1, freq=1; bool absDepth=false; while (token != NULL) { // while there are tokens in the string still if (startswith("#", token) || startswith(" ", buffer)) // Ignore commented lines and lines with just a space. break; // the xygrad keywords must occur before the corresponding xy keywords if (startswith("rhograd=", token)) { token += 8; // skip rhograd= rhograd = atof(token); } else if (startswith("vpgrad=", token)) { token += 7; // skip vpgrad= vpgrad = atof(token); } else if (startswith("vsgrad=", token)) { token += 7; // skip vsgrad= vsgrad = atof(token); } else if (startswith("vp=", token) ) { token += 3; // skip vp= vp = atof(token); } else if (startswith("vs=", token) ) { token += 3; // skip vs= vs = atof(token); } else if (startswith("rho=", token)) { token += 4; // skip rho= rho = atof(token); } else if (startswith("r=", token)) // superseded by rho=, but keep for backward compatibility { token += 2; // skip r= rho = atof(token); } else if (startswith("Qs=", token) || startswith("qs=",token) ) { token += 3; // skip qs= qs = atof(token); } else if (startswith("Qp=", token) || startswith("qp=",token) ) { token += 3; // skip qp= qp = atof(token); } else if (startswith("absdepth=", token) ) { token += 9; // skip absdepth= absDepth = (bool) atoi(token); } else if (startswith("x1=", token)) { token += 3; // skip x1= x1 = atof(token); x1set = true; } else if (startswith("x2=", token)) { token += 3; // skip x2= x2 = atof(token); x2set = true; } else if (startswith("y1=", token)) { token += 3; // skip y1= y1 = atof(token); y1set = true; } else if (startswith("y2=", token)) { token += 3; // skip y2= y2 = atof(token); y2set = true; } else if (startswith("z1=", token)) { token += 3; // skip z1= z1 = atof(token); z1set = true; } else if (startswith("z2=", token)) { token += 3; // skip z2= z2 = atof(token); z2set = true; } else { badOption("block", token); } token = strtok(NULL, " \t"); } // End parsing... // Set up a block on the EW object. if (x1set) { CHECK_INPUT(x1 <= m_global_xmax, err << "x1 is greater than the maximum x, " << x1 << " > " << m_global_xmax); } else x1 = -m_global_xmax; //x1 = 0.; if (x2set) { CHECK_INPUT(x2 >= 0., err << "x2 is less than the minimum x, " << x2 << " < " << 0.); } else x2 = 2.*m_global_xmax;//x2 = m_global_xmax; CHECK_INPUT( x2 >= x1, " (x1..x2), upper bound is smaller than lower bound"); //-------------------------------------------------------- // Set j bounds, goes with Y in WPP //-------------------------------------------------------- if (y1set) { CHECK_INPUT(y1 <= m_global_ymax, err << "y1 is greater than the maximum y, " << y1 << " > " << m_global_ymax); } else y1 = -m_global_ymax;//y1 = 0.; if (y2set) { CHECK_INPUT(y2 >= 0., err << "y2 is less than the minimum y, " << y2 << " < " << 0.); } else y2 = 2.*m_global_ymax;//y2 = m_global_ymax; CHECK_INPUT( y2 >= y1, " (y1..y2), upper bound is smaller than lower bound"); if (z1set) { CHECK_INPUT(z1 <= m_global_zmax, err << "z1 is greater than the maximum z, " << z1 << " > " << m_global_zmax); } else z1 = m_global_zmin - (m_global_zmax-m_global_zmin); if (z2set) { CHECK_INPUT(topographyExists() || z2 >= 0., err << "z2 is less than the minimum z, " << z2 << " < " << 0.); } else z2 = m_global_zmax + (m_global_zmax-m_global_zmin); CHECK_INPUT( z2 >= z1, " (z1..z2), upper bound is smaller than lower bound"); if( getVerbosity() >=2 && m_myrank == 0 ) cout << name << " has bounds " << x1 << " " << x2 << " " << y1 << " " << y2 << " " << z1 << " " << z2 << endl; CHECK_INPUT( vs > 0 && vp > 0 && rho > 0 , "Error in block " << name << " vp vs rho are " << vp << " " << vs << " " << rho ); MaterialBlock* bl = new MaterialBlock( this ,rho, vs, vp, x1, x2, y1, y2, z1, z2, qs, qp, freq ); bl->set_gradients( rhograd, vsgrad, vpgrad ); bl->set_absoluteDepth( absDepth ); m_mtrlblocks.push_back(bl); } //----------------------------------------------------------------------- void EW::processReceiver(char* buffer ) { float_sw4 x=0.0, y=0.0, z=0.0; float_sw4 lat = 0.0, lon = 0.0, depth = 0.0; bool cartCoordSet = false, geoCoordSet = false; string fileName = "station"; string staName = "station"; bool staNameGiven=false; int writeEvery = 1000; bool topodepth = false; bool usgsformat = 0, sacformat=1; // default is to write sac files TimeSeries::receiverMode mode=TimeSeries::Displacement; char* token = strtok(buffer, " \t"); bool nsew=false; // cerr << "******************** INSIDE process receiver *********************" << endl; CHECK_INPUT(strcmp("rec", token) == 0 || strcmp("sac", token) == 0, "ERROR: not a rec line...: " << token); token = strtok(NULL, " \t"); string err = "RECEIVER Error: "; while (token != NULL) { // while there are tokens in the string still // cout << m_myRank << " token " << token <<"x"<<endl; if (startswith("#", token) || startswith(" ", buffer)) // Ignore commented lines and lines with just a space. break; if (startswith("x=", token)) { CHECK_INPUT(!geoCoordSet, err << "receiver command: Cannot set both a geographical (lat, lon) and a cartesian (x,y) coordinate"); token += 2; // skip x= cartCoordSet = true; x = atof(token); CHECK_INPUT(x >= 0.0, "receiver command: x must be greater than or equal to 0, not " << x); CHECK_INPUT(x <= m_global_xmax, "receiver command: x must be less than or equal to xmax, not " << x); } else if (startswith("y=", token)) { CHECK_INPUT(!geoCoordSet, err << "receiver command: Cannot set both a geographical (lat, lon) and a cartesian (x,y) coordinate"); token += 2; // skip y= cartCoordSet = true; y = atof(token); CHECK_INPUT(y >= 0.0, "receiver command: y must be greater than or equal to 0, not " << y); CHECK_INPUT(y <= m_global_ymax, "receiver command: y must be less than or equal to ymax, not " << y); } else if (startswith("lat=", token)) { CHECK_INPUT(!cartCoordSet, err << "receiver command: Cannot set both a geographical (lat, lon) and a cartesian (x,y) coordinate"); token += 4; // skip lat= lat = atof(token); CHECK_INPUT(lat >= -90.0, "receiver command: lat must be greater than or equal to -90 degrees, not " << lat); CHECK_INPUT(lat <= 90.0, "receiver command: lat must be less than or equal to 90 degrees, not " << lat); geoCoordSet = true; } else if (startswith("lon=", token)) { CHECK_INPUT(!cartCoordSet, err << "receiver command: Cannot set both a geographical (lat, lon) and a cartesian (x,y) coordinate"); token += 4; // skip lon= lon = atof(token); CHECK_INPUT(lon >= -180.0, "receiver command: lon must be greater or equal to -180 degrees, not " << lon); CHECK_INPUT(lon <= 180.0, "receiver command: lon must be less than or equal to 180 degrees, not " << lon); geoCoordSet = true; } else if (startswith("z=", token)) { token += 2; // skip z= depth = z = atof(token); topodepth = false; // absolute depth (below mean sea level) CHECK_INPUT(z <= m_global_zmax, "receiver command: z must be less than or equal to zmax, not " << z); } else if (startswith("depth=", token)) { token += 6; // skip depth= z = depth = atof(token); topodepth = true; // by depth we here mean depth below topography CHECK_INPUT(depth >= 0.0, err << "receiver command: depth must be greater than or equal to zero"); CHECK_INPUT(depth <= m_global_zmax, "receiver command: depth must be less than or equal to zmax, not " << depth); } else if (startswith("topodepth=", token)) { token += 10; // skip topodepth= z = depth = atof(token); topodepth = true; // by depth we here mean depth below topography CHECK_INPUT(depth >= 0.0, err << "receiver command: depth must be greater than or equal to zero"); CHECK_INPUT(depth <= m_global_zmax, "receiver command: depth must be less than or equal to zmax, not " << depth); } else if(startswith("file=", token)) { token += 5; // skip file= fileName = token; } else if (startswith("sta=", token)) { token += strlen("sta="); staName = token; staNameGiven=true; } else if( startswith("nsew=", token) ) { token += strlen("nsew="); nsew = atoi(token) == 1; } else if (startswith("writeEvery=", token)) { token += strlen("writeEvery="); writeEvery = atoi(token); CHECK_INPUT(writeEvery >= 0, err << "sac command: writeEvery must be set to a non-negative integer, not: " << token); } else if( startswith("usgsformat=", token) ) { token += strlen("usgsformat="); usgsformat = atoi(token); } else if( startswith("sacformat=", token) ) { token += strlen("sacformat="); sacformat = atoi(token); } else if( startswith("variables=", token) ) { token += strlen("variables="); if( strcmp("displacement",token)==0 ) { mode = TimeSeries::Displacement; } else if( strcmp("velocity",token)==0 ) { mode = TimeSeries::Velocity; } else if( strcmp("div",token)==0 ) { mode = TimeSeries::Div; } else if( strcmp("curl",token)==0 ) { mode = TimeSeries::Curl; } else if( strcmp("strains",token)==0 ) { mode = TimeSeries::Strains; } else if( strcmp("displacementgradient",token)==0 ) { mode = TimeSeries::DisplacementGradient; } else { if (m_myrank == 0 ) cout << "receiver command: variables=" << token << " not understood" << endl << "using default mode (displacement)" << endl << endl; mode = TimeSeries::Displacement; } } else { badOption("receiver", token); } token = strtok(NULL, " \t"); } if (geoCoordSet) { computeCartesianCoord(x, y, lon, lat); // check if (x,y) is within the computational domain } if (!staNameGiven) staName = fileName; bool inCurvilinear=false; // we are in or above the curvilinear grid if ( topographyExists() && z < m_zmin[mNumberOfCartesianGrids-1]) { inCurvilinear = true; } // check if (x,y,z) is not in the global bounding box if ( !( (inCurvilinear || z >= 0) && x>=0 && x<=m_global_xmax && y>=0 && y<=m_global_ymax)) { // The location of this station was outside the domain, so don't include it in the global list if (m_myrank == 0 && getVerbosity() > 0) { stringstream receivererr; receivererr << endl << "***************************************************" << endl << " WARNING: RECEIVER positioned outside grid!" << endl; receivererr << " No RECEIVER file will be generated for file = " << fileName << endl; if (geoCoordSet) { receivererr << " @ lon=" << lon << " lat=" << lat << " depth=" << depth << endl << endl; } else { receivererr << " @ x=" << x << " y=" << y << " z=" << z << endl << endl; } receivererr << "***************************************************" << endl; cerr << receivererr.str(); cerr.flush(); } } else { TimeSeries *ts_ptr = new TimeSeries(this, fileName, staName, mode, sacformat, usgsformat, x, y, depth, topodepth, writeEvery, !nsew); // include the receiver in the global list m_GlobalTimeSeries.push_back(ts_ptr); } } //----------------------------------------------------------------------- void EW::defineDimensionsGXY( ) { // // Defines the number of grids and dimensions in the x- and y-directions, // It also defines the parallel decomposition, which is only made in the x-y directions. // // The z-direction requires topography to be known before computing dimensions. // x- and y-dimensions must be defined before the topography is read. // Hence, we have to 1. Define x and y dimensions, // 2. Read the topography // 3. Define z dimensions. if (mVerbose && m_myrank == 0 ) printf("defineDimensionsGXY: #ghost points=%i, #parallel padding points=%i\n", m_ghost_points, m_ppadding); // Grids are enumerated from bottom to the top, i.e, g=0 is at the bottom, and g=mNumberOfGrids-1 is at the top. // Note, this is oposite to the z-coordinate which is largest at the bottom and smallest at the top. if( m_nz_base > 1 && !m_topography_exists ) { // Flat mNumberOfCartesianGrids = mNumberOfGrids = 1; m_is_curvilinear.push_back(false); } else if( m_nz_base > 1 && m_topography_exists ) { // Curvilinear mNumberOfGrids = 2; mNumberOfCartesianGrids = 1; m_is_curvilinear.push_back(false); m_is_curvilinear.push_back(true); } else if( m_myrank == 0 ) cout << "ERROR in defineDimensionsXY, domain could not be defined" << endl; // Compute parallel decomposition int nx_finest_w_ghost = m_nx_base+2*m_ghost_points; int ny_finest_w_ghost = m_ny_base+2*m_ghost_points; proc_decompose_2d( nx_finest_w_ghost, ny_finest_w_ghost, m_nprocs, m_nprocs_2d ); int is_periodic[2]={0,0}; MPI_Cart_create( MPI_COMM_WORLD, 2, m_nprocs_2d, is_periodic, true, &m_cartesian_communicator ); // int my_proc_coords[2]; MPI_Cart_get( m_cartesian_communicator, 2, m_nprocs_2d, is_periodic, m_myrank_2d ); MPI_Cart_shift( m_cartesian_communicator, 0, 1, m_neighbor, m_neighbor+1 ); MPI_Cart_shift( m_cartesian_communicator, 1, 1, m_neighbor+2, m_neighbor+3 ); if( m_myrank == 0 && mVerbose >= 3) { cout << " Grid distributed on " << m_nprocs << " processors " << endl; cout << " Finest grid size " << nx_finest_w_ghost << " x " << ny_finest_w_ghost << endl; cout << " Processor array " << m_nprocs_2d[0] << " x " << m_nprocs_2d[1] << endl; } int ifirst, ilast, jfirst, jlast; decomp1d( nx_finest_w_ghost, m_myrank_2d[0], m_nprocs_2d[0], ifirst, ilast ); decomp1d( ny_finest_w_ghost, m_myrank_2d[1], m_nprocs_2d[1], jfirst, jlast ); ifirst -= m_ghost_points; ilast -= m_ghost_points; jfirst -= m_ghost_points; jlast -= m_ghost_points; // Define dimension arrays mGridSize.resize(mNumberOfGrids); m_global_nx.resize(mNumberOfGrids); m_global_ny.resize(mNumberOfGrids); m_iStart.resize(mNumberOfGrids); m_iEnd.resize(mNumberOfGrids); m_jStart.resize(mNumberOfGrids); m_jEnd.resize(mNumberOfGrids); m_iStartInt.resize(mNumberOfGrids); m_iEndInt.resize(mNumberOfGrids); m_jStartInt.resize(mNumberOfGrids); m_jEndInt.resize(mNumberOfGrids); // Compute decomposition of x-y dimensions. for( int g = 0 ; g < mNumberOfGrids; g++ ) { mGridSize[g] = m_h_base; m_global_nx[g] = m_nx_base; m_global_ny[g] = m_ny_base; // save the local index bounds m_iStart[g] = ifirst; m_iEnd[g] = ilast; m_jStart[g] = jfirst; m_jEnd[g] = jlast; // local index bounds for interior points (= no ghost or parallel padding points) if (ifirst == 1-m_ghost_points) m_iStartInt[g] = 1; else m_iStartInt[g] = ifirst+m_ppadding; if (ilast == m_global_nx[g] + m_ghost_points) m_iEndInt[g] = m_global_nx[g]; else m_iEndInt[g] = ilast - m_ppadding; if (jfirst == 1-m_ghost_points) m_jStartInt[g] = 1; else m_jStartInt[g] = jfirst+m_ppadding; if (jlast == m_global_ny[g] + m_ghost_points) m_jEndInt[g] = m_global_ny[g]; else m_jEndInt[g] = jlast - m_ppadding; } // Set up arrays of arrays. // Materials mMu.resize(mNumberOfGrids); mLambda.resize(mNumberOfGrids); mRho.resize(mNumberOfGrids); // Super-grid data m_sg_dc_x.resize(mNumberOfGrids); m_sg_dc_y.resize(mNumberOfGrids); m_sg_dc_z.resize(mNumberOfGrids); m_sg_str_x.resize(mNumberOfGrids); m_sg_str_y.resize(mNumberOfGrids); m_sg_str_z.resize(mNumberOfGrids); m_sg_corner_x.resize(mNumberOfGrids); m_sg_corner_y.resize(mNumberOfGrids); m_sg_corner_z.resize(mNumberOfGrids); // Boundary information m_onesided.resize(mNumberOfGrids); m_bcType.resize(mNumberOfGrids); // Default values for( int g= 0 ;g < mNumberOfGrids ; g++ ) { m_onesided[g] = new int[6]; m_bcType[g] = new boundaryConditionType[6]; for( int side =0 ; side < 6 ; side++ ) { m_onesided[g][side] = 0; m_bcType[g][side] = bProcessor; } } } //----------------------------------------------------------------------- void EW::defineDimensionsZ() { // Assumes that topography is known, and computes the z-direction // dimensions of arrays. // Compute average elevation float_sw4 topo_avg=0; if( m_topography_exists ) { float_sw4 tzmin, tzmax; compute_minmax_topography(tzmin,tzmax); topo_avg = 0.5*(tzmin+tzmax); } m_zmin.resize(mNumberOfGrids); m_global_nz.resize(mNumberOfGrids); // Define m_zmin and m_global_nk. // Adjust m_global_zmin and m_global_zmax, if necessary. if( m_nz_base > 1 && !m_topography_exists ) { // Flat m_global_nz[0] = m_nz_base; m_zmin[0] = 0; } else if( m_nz_base > 1 && m_topography_exists ) { // Curvilinear int nz = static_cast<int>(1 + round((m_global_zmax-m_topo_zmax)/m_h_base)); m_global_zmax = m_topo_zmax+(nz-1)*m_h_base; m_global_nz[0] = nz; m_zmin[0] = m_topo_zmax; m_global_nz[1] = static_cast<int>(1 + round((m_topo_zmax - topo_avg)/m_h_base)); m_zmin[1] = 1e38; } else if( m_myrank == 0 ) cout << "ERROR in defineDimensionsZ, elastic domain could not be defined" << endl; // Define local z-dimension arrays m_kStart.resize(mNumberOfGrids); m_kEnd.resize(mNumberOfGrids); m_kStartInt.resize(mNumberOfGrids); m_kEndInt.resize(mNumberOfGrids); for( int g = 0 ; g < mNumberOfGrids; g++ ) { m_kStart[g] = 1-m_ghost_points; m_kEnd[g] = m_global_nz[g] + m_ghost_points; m_kStartInt[g] = 1; m_kEndInt[g] = m_global_nz[g]; } if (mVerbose >= 1 && m_myrank == 0) cout << "Extent of the computational domain xmax=" << m_global_xmax << " ymax=" << m_global_ymax << " zmin = " << m_global_zmin << " zmax=" << m_global_zmax << endl; } //----------------------------------------------------------------------- void EW::allocateTopoArrays() { if( m_topography_exists ) { int ifirst = m_iStart[mNumberOfGrids-1]; int ilast = m_iEnd[mNumberOfGrids-1]; int jfirst = m_jStart[mNumberOfGrids-1]; int jlast = m_jEnd[mNumberOfGrids-1]; // Two versions of the topography: mTopo.define(ifirst,ilast,jfirst,jlast,1,1); // true topography/bathymetry, read directly // smoothed version of true topography, with an extended number (4 instead of 2 ) of ghost points. m_ext_ghost_points = 2; mTopoGridExt.define(ifirst-m_ext_ghost_points,ilast+m_ext_ghost_points, jfirst-m_ext_ghost_points,jlast+m_ext_ghost_points,1,1); } } //----------------------------------------------------------------------- void EW::allocateArrays() { for( int g=0 ; g < mNumberOfGrids ; g++ ) { int ifirst = m_iStart[g]; int ilast = m_iEnd[g]; int jfirst = m_jStart[g]; int jlast = m_jEnd[g]; int kfirst = m_kStart[g]; int klast = m_kEnd[g]; // Material data mMu[g].define(ifirst,ilast,jfirst,jlast,kfirst,klast); mRho[g].define(ifirst,ilast,jfirst,jlast,kfirst,klast); mLambda[g].define(ifirst,ilast,jfirst,jlast,kfirst,klast); // initialize the material coefficients to -1 mMu[g].set_to_minusOne(); mRho[g].set_to_minusOne(); mLambda[g].set_to_minusOne(); // Supergrid arrays m_sg_dc_x[g] = new float_sw4[ilast-ifirst+1]; m_sg_dc_y[g] = new float_sw4[jlast-jfirst+1]; m_sg_dc_z[g] = new float_sw4[klast-kfirst+1]; m_sg_str_x[g] = new float_sw4[ilast-ifirst+1]; m_sg_str_y[g] = new float_sw4[jlast-jfirst+1]; m_sg_str_z[g] = new float_sw4[klast-kfirst+1]; m_sg_corner_x[g] = new float_sw4[ilast-ifirst+1]; m_sg_corner_y[g] = new float_sw4[jlast-jfirst+1]; m_sg_corner_z[g] = new float_sw4[klast-kfirst+1]; if( m_topography_exists && g == mNumberOfGrids-1 ) { // Grid and metric mJ.define(ifirst,ilast,jfirst,jlast,kfirst,klast); mX.define(ifirst,ilast,jfirst,jlast,kfirst,klast); mY.define(ifirst,ilast,jfirst,jlast,kfirst,klast); mZ.define(ifirst,ilast,jfirst,jlast,kfirst,klast); mMetric.define(4,ifirst,ilast,jfirst,jlast,kfirst,klast); // Initialization, to touch memory in case OpenMP is in use mJ.set_to_zero(); mX.set_to_zero(); mY.set_to_zero(); mZ.set_to_zero(); mMetric.set_to_zero(); } } } //----------------------------------------------------------------------- void EW::printGridSizes() const { if (m_myrank == 0) { int nx, ny, nz; float_sw4 nTot=0.; printf("\nGlobal grid sizes (without ghost points)\n"); printf("Grid h Nx Ny Nz Points\n"); for (int g = 0; g < mNumberOfGrids; g++) { nx = m_global_nx[g]; ny = m_global_ny[g]; nz = m_kEnd[g] - m_ghost_points; nTot += ((long long int)nx)*ny*nz; printf("%4i %9g %9i %9i %9i %12lld\n", g, mGridSize[g], nx, ny, nz, ((long long int)nx)*ny*nz); } printf("Total number of grid points (without ghost points): %g\n\n", nTot); } } //----------------------------------------------------------------------- bool EW::parseInputFile( const string& filename ) { char buffer[256]; bool foundGrid = false; MPI_Barrier(MPI_COMM_WORLD); ifstream inputFile; inputFile.open(filename.c_str()); if (!inputFile.is_open()) { if (m_myrank == 0) cerr << endl << "ERROR: Failure opening input file: " << filename << endl; return false; } while (!inputFile.eof()) { inputFile.getline(buffer, 256); if( startswith("grid", buffer) ) { foundGrid = true; processGrid(buffer); } // Need process developer before setupMPICommunication, because of array ordering m_corder else if(startswith("developer", buffer)) processDeveloper(buffer); else if (startswith("topography", buffer)) processTopography(buffer); else if( startswith("fileio",buffer)) processFileIO(buffer); } if (!foundGrid) if (m_myrank == 0) { cerr << "ERROR: No grid found in input file: " << filename << endl; return false; } defineDimensionsGXY(); if( m_topography_exists ) { allocateTopoArrays(); if( m_topoInputStyle == EW::GaussianHill ) buildGaussianHillTopography(m_GaussianAmp, m_GaussianLx, m_GaussianLy, m_GaussianXc, m_GaussianYc); } defineDimensionsZ(); setupMPICommunications(); allocateArrays(); if( m_topography_exists ) { generate_grid(); setup_metric(); } // output grid size info printGridSizes(); // set default boundary conditions, default_bcs(); inputFile.clear(); inputFile.seekg(0, ios::beg); // reset file pointer to the beginning of the input file while (!inputFile.eof()) { inputFile.getline(buffer, 256); if (strlen(buffer) > 0) // empty lines produce this { if (startswith("#", buffer) || startswith("grid", buffer) || startswith("developer", buffer) || startswith("topography", buffer) || startswith("fileio", buffer) || startswith("\n", buffer) || startswith("\r", buffer) ) { } else if(startswith("time", buffer)) processTime(buffer); else if( startswith("source",buffer)) processSource(buffer); else if( startswith("supergrid",buffer)) processSuperGrid(buffer); else if(startswith("testpointsource", buffer)) processTestPointSource(buffer); // else if(startswith("developer", buffer)) // processDeveloper(buffer); else if( startswith("checkpoint",buffer)) processCheckPoint(buffer); else if( startswith("restart",buffer)) processRestart(buffer); else if( startswith("rec",buffer)) processReceiver(buffer); else if( startswith("block",buffer)) processMaterialBlock(buffer); else if (!inputFile.eof() && m_myrank == 0) { cout << "*** Ignoring command: '" << buffer << "'" << endl; } } } inputFile.close(); if( m_myrank == 0 ) cout << "Done reading input file " << endl; MPI_Barrier(MPI_COMM_WORLD); return true; } //----------------------------------------------------------------------- void EW::setupRun() { // Assign values to material data arrays mRho,mMu,mLambda setup_materials(); // Check if any GPUs are available find_cuda_device( ); m_cuobj->initialize_gpu(m_myrank); // setup coefficients for SBP operators setupSBPCoeff(); // Check that f.d. operators fit inside the domains check_dimensions(); // Initialize IO create_output_directory( ); // Set up supergrid setup_supergrid( ); assign_supergrid_damping_arrays(); // Copy material to GPU copy_material_to_device(); // B.C. data structures assign_local_bcs(); setup_boundary_arrays(); // Time step computeDT( ); // Set up sources: for( int s=0 ; s < m_globalUniqueSources.size() ; s++) { m_globalUniqueSources[s]->set_grid_point_sources4( this, m_point_sources ); } // Sorting sources on grid index will allow more efficient parallel code with multi-core sort_grid_point_sources(); if( m_myrank == 0 && m_globalUniqueSources.size() > 0 ) cout << "setup of sources done" << endl; if( m_cuobj->has_gpu() ) { copy_point_sources_to_gpu( ); init_point_sourcesCU( ); } // Setup I/O in check points if( m_restart_check_point != CheckPoint::nil ) m_restart_check_point->setup_sizes(); for( int c = 0 ; c < m_check_points.size() ; c++ ) m_check_points[c]->setup_sizes(); if( m_myrank == 0 && (m_restart_check_point != CheckPoint::nil || m_check_points.size() > 0) ) cout << "setup of check point file done" << endl; } //----------------------------------------------------------------------- void EW::timesteploop( vector<Sarray>& U, vector<Sarray>& Um ) { // input: U,Um,mMu,mLambda,mRho, // local arrays: F, Up, Lu, Uacc vector<Sarray> F, Lu, Uacc, Up; // Pointer to Sarray on device, not sure if std::vector is available. Sarray* dev_F, *dev_Um, *dev_U, *dev_Up, *dev_metric, *dev_j; float_sw4* gridsize_dev; // Do all timing in double, time differences have to much cancellation for float. double time_start_solve = MPI_Wtime(); bool saveerror = false; // Define local arrays F.resize(mNumberOfGrids); Lu.resize(mNumberOfGrids); Uacc.resize(mNumberOfGrids); Up.resize(mNumberOfGrids); U.resize(mNumberOfGrids); Um.resize(mNumberOfGrids); for( int g=0 ; g < mNumberOfGrids ; g++ ) { int ifirst = m_iStart[g], ilast = m_iEnd[g]; int jfirst = m_jStart[g], jlast = m_jEnd[g]; int kfirst = m_kStart[g], klast = m_kEnd[g]; F[g].define(3,ifirst,ilast,jfirst,jlast,kfirst,klast); Lu[g].define(3,ifirst,ilast,jfirst,jlast,kfirst,klast); Uacc[g].define(3,ifirst,ilast,jfirst,jlast,kfirst,klast); Up[g].define(3,ifirst,ilast,jfirst,jlast,kfirst,klast); U[g].define(3,ifirst,ilast,jfirst,jlast,kfirst,klast); Um[g].define(3,ifirst,ilast,jfirst,jlast,kfirst,klast); } // Set up boundary data array //vector<float_sw4**> BCForcing; BCForcing.resize(mNumberOfGrids); for( int g = 0; g <mNumberOfGrids; g++ ) { BCForcing[g] = new float_sw4*[6]; for (int side=0; side < 6; side++) { BCForcing[g][side]=NULL; if (m_bcType[g][side] == bStressFree || m_bcType[g][side] == bDirichlet || m_bcType[g][side] == bSuperGrid) { BCForcing[g][side] = new float_sw4[3*m_NumberOfBCPoints[g][side]]; } } } // Initial data, touch all memory even in // arrays that do not need values, in order // to initialize OpenMP with good memory access for( int g=0 ; g < mNumberOfGrids ; g++ ) { U[g].set_value(0.0); Um[g].set_value(0.0); F[g].set_value(0.0); Up[g].set_value(0.0); Uacc[g].set_value(0.0); Lu[g].set_value(0.0); } int beginCycle = 0; float_sw4 t = mTstart; if( m_restart_check_point != CheckPoint::nil ) { m_restart_check_point->read_checkpoint( t, beginCycle, Um, U ); for(int g=0 ; g < mNumberOfGrids ; g++ ) { communicate_array( U[g], g ); communicate_array( Um[g], g ); } cartesian_bc_forcing( t, BCForcing, m_globalUniqueSources ); enforceBC( U, mMu, mLambda, t, BCForcing ); cartesian_bc_forcing( t-mDt, BCForcing, m_globalUniqueSources ); enforceBC( Um, mMu, mLambda, t-mDt, BCForcing ); } beginCycle++; copy_bcforcing_arrays_to_device(); copy_bctype_arrays_to_device(); copy_bndrywindow_arrays_to_device(); double time_measure[20]; double time_sum[20]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; for (int ts=0; ts<m_GlobalTimeSeries.size(); ts++) m_GlobalTimeSeries[ts]->allocateRecordingArrays( mNumberOfTimeSteps+1, mTstart, mDt); if (m_myrank == 0) { cout << "Running on " << m_nprocs << " MPI tasks" << endl; } #ifdef SW4_OPENMP #pragma omp parallel { if( omp_get_thread_num() == 0 && m_myrank == 0 ) { int nth=omp_get_num_threads(); cout << "Using OpenMP with " << nth << " thread"; if( nth > 1 ) cout << "s"; cout << " per MPI task" << endl; } } #endif hipMalloc( (void**)&dev_F, sizeof(Sarray)*mNumberOfGrids); hipMalloc( (void**)&dev_Um, sizeof(Sarray)*mNumberOfGrids); hipMalloc( (void**)&dev_U, sizeof(Sarray)*mNumberOfGrids); hipMalloc( (void**)&dev_Up, sizeof(Sarray)*mNumberOfGrids); hipMalloc( (void**)&dev_metric, sizeof(Sarray)); hipMalloc( (void**)&dev_j, sizeof(Sarray)); hipMalloc( (void**)&gridsize_dev, sizeof(float_sw4)*mNumberOfGrids); for( int g=0 ; g < mNumberOfGrids ; g++ ) { Lu[g].copy_to_device(m_cuobj); Up[g].copy_to_device(m_cuobj); Um[g].copy_to_device(m_cuobj); U[g].copy_to_device(m_cuobj); Uacc[g].copy_to_device(m_cuobj); F[g].copy_to_device(m_cuobj); F[g].page_lock(m_cuobj); U[g].page_lock(m_cuobj); Um[g].page_lock(m_cuobj); Up[g].page_lock(m_cuobj); } hipMemcpy( dev_F, &F[0], mNumberOfGrids*sizeof(Sarray), hipMemcpyHostToDevice ); hipMemcpy( dev_Um, &Um[0], mNumberOfGrids*sizeof(Sarray), hipMemcpyHostToDevice ); hipMemcpy( dev_U, &U[0], mNumberOfGrids*sizeof(Sarray), hipMemcpyHostToDevice ); hipMemcpy( dev_Up, &Up[0], mNumberOfGrids*sizeof(Sarray), hipMemcpyHostToDevice ); hipMemcpy( dev_metric, &mMetric, sizeof(Sarray), hipMemcpyHostToDevice ); hipMemcpy( dev_j, &mJ, sizeof(Sarray), hipMemcpyHostToDevice ); hipMemcpy( gridsize_dev, &mGridSize[0], sizeof(float_sw4)*mNumberOfGrids, hipMemcpyHostToDevice ); // save initial data on receiver records vector<float_sw4> uRec; for (int ts=0; ts<m_GlobalTimeSeries.size(); ts++) { // can't compute a 2nd order accurate time derivative at this point // therefore, don't record anything related to velocities for the initial data if (m_GlobalTimeSeries[ts]->getMode() != TimeSeries::Velocity && m_GlobalTimeSeries[ts]->myPoint()) { int i0 = m_GlobalTimeSeries[ts]->m_i0; int j0 = m_GlobalTimeSeries[ts]->m_j0; int k0 = m_GlobalTimeSeries[ts]->m_k0; int grid0 = m_GlobalTimeSeries[ts]->m_grid0; extractRecordData(m_GlobalTimeSeries[ts]->getMode(), i0, j0, k0, grid0, uRec, Um, U); m_GlobalTimeSeries[ts]->recordData(uRec); } } // Build TimeSeries help data structure for GPU int* i0dev, *j0dev, *k0dev, *g0dev; int* modedev; float_sw4** urec_dev; // array of pointers on device pointing to device memory float_sw4** urec_host; // array of pointers on host pointing to host memory float_sw4** urec_hdev; // array of pointers on host pointing to device memory int nvals=0, ntloc=0; allocateTimeSeriesOnDeviceCU( nvals, ntloc, i0dev, j0dev, k0dev, g0dev, modedev, urec_dev, urec_host, urec_hdev ); if( m_myrank == 0 ) cout << "starting at time " << t << " at cycle " << beginCycle << endl; double* trdata; if( m_save_trace ) { trdata = new double[12*(mNumberOfTimeSteps+1)]; MPI_Barrier(m_cartesian_communicator); } // Set up the array for data communication setup_device_communication_array(); // Begin time stepping loop for( int currentTimeStep = beginCycle; currentTimeStep <= mNumberOfTimeSteps; currentTimeStep++ ) { time_measure[0] = MPI_Wtime(); // all types of forcing... if( m_cuobj->has_gpu() ) ForceCU( t, dev_F, false, 0 ); else Force( t, F, m_point_sources, false ); if( m_checkfornan ) { check_for_nan_GPU( F, 1, "F" ); check_for_nan_GPU( U, 1, "U" ); } time_measure[1] = MPI_Wtime(); // evaluate right hand side if( m_cuobj->has_gpu() ) { // evalRHSCU( U, mMu, mLambda, Lu, 0 ); // save Lu in composite grid 'Lu' // RHS + predictor in the rest (stream 0) RHSPredCU_boundary (Up, U, Um, mMu, mLambda, mRho, F, 0); // Wait for stream 0 to complete m_cuobj->sync_stream(0); RHSPredCU_center (Up, U, Um, mMu, mLambda, mRho, F, 1); } else evalRHS( U, mMu, mLambda, Lu ); // save Lu in composite grid 'Lu' if( m_checkfornan ) check_for_nan_GPU( Lu, 1, "Lu pred. " ); // take predictor step, store in Up m_cuobj->sync_stream( 0 ); //predictor is merged into RHSPredCU_* if( ! m_cuobj->has_gpu() ) evalPredictor( Up, U, Um, mRho, Lu, F ); time_measure[2] = MPI_Wtime(); // communicate across processor boundaries if( m_cuobj->has_gpu() ) { for(int g=0 ; g < mNumberOfGrids ; g++ ) { //communicate_arrayCU( Up[g], g, 0); pack_HaloArrayCU_X (Up[g], g, 0); communicate_arrayCU_X( Up[g], g, 0); unpack_HaloArrayCU_X (Up[g], g, 0); pack_HaloArrayCU_Y (Up[g], g, 0); communicate_arrayCU_Y( Up[g], g, 0); unpack_HaloArrayCU_Y (Up[g], g, 0); } hipDeviceSynchronize(); } else { for(int g=0 ; g < mNumberOfGrids ; g++ ) communicate_array( Up[g], g ); } time_measure[3] = MPI_Wtime(); // calculate boundary forcing at time t+mDt if( m_cuobj->has_gpu() ) { cartesian_bc_forcingCU( t+mDt, BCForcing, m_globalUniqueSources,0); enforceBCCU( Up, mMu, mLambda, t+mDt, BCForcing, 0); } else { cartesian_bc_forcing( t+mDt, BCForcing, m_globalUniqueSources ); enforceBC( Up, mMu, mLambda, t+mDt, BCForcing ); } if( m_checkfornan ) check_for_nan( Up, 1, "U pred. " ); time_measure[4] = MPI_Wtime(); // Corrector if( m_cuobj->has_gpu() ) { ForceCU( t, dev_F, true, 0 ); hipDeviceSynchronize(); } else Force( t, F, m_point_sources, true ); time_measure[5] = MPI_Wtime(); if( m_cuobj->has_gpu() ) evalDpDmInTimeCU( Up, U, Um, Uacc, 0 ); // store result in Uacc else evalDpDmInTime( Up, U, Um, Uacc ); // store result in Uacc if( m_checkfornan ) check_for_nan_GPU( Uacc, 1, "uacc " ); if( m_cuobj->has_gpu() ) { // RHS + corrector in the free surface and halos (stream 0) RHSCorrCU_boundary (Up, Uacc, mMu, mLambda, mRho, F, 0); // Add super grid damping terms in the free surface and halos (stream 0) addSuperGridDampingCU_upper_boundary (Up, U, Um, mRho, 0); // Wait for stream 0 to complete m_cuobj->sync_stream(0); RHSCorrCU_center (Up, Uacc, mMu, mLambda, mRho, F, 1); } else evalRHS( Uacc, mMu, mLambda, Lu ); if( m_checkfornan ) check_for_nan_GPU( Lu, 1, "L(uacc) " ); //corrector is merged into RHSCorrCU_* if( !m_cuobj->has_gpu() ) evalCorrector( Up, mRho, Lu, F ); time_measure[6] = MPI_Wtime(); // add in super-grid damping terms if ( m_use_supergrid ) { if( m_cuobj->has_gpu() ) { // addSuperGridDampingCU( Up, U, Um, mRho, 0 ); // Add super grid damping terms in the rest of the cube (stream 1) addSuperGridDampingCU_center (Up, U, Um, mRho, 1); // Add super grid damping terms in the rest of the cube (stream 1) m_cuobj->sync_stream(1); } else addSuperGridDamping( Up, U, Um, mRho ); } time_measure[7] = MPI_Wtime(); // also check out EW::update_all_boundaries // communicate across processor boundaries if( m_cuobj->has_gpu() ) for(int g=0 ; g < mNumberOfGrids ; g++ ) { pack_HaloArrayCU_X (Up[g], g, 0); communicate_arrayCU_X( Up[g], g, 0 ); unpack_HaloArrayCU_X (Up[g], g, 0); pack_HaloArrayCU_Y (Up[g], g, 0); communicate_arrayCU_Y( Up[g], g, 0 ); unpack_HaloArrayCU_Y (Up[g], g, 0); } else for(int g=0 ; g < mNumberOfGrids ; g++ ) communicate_array( Up[g], g ); time_measure[8] = MPI_Wtime(); // calculate boundary forcing at time t+mDt (do we really need to call this fcn again???) if( m_cuobj->has_gpu() ) { cartesian_bc_forcingCU( t+mDt, BCForcing, m_globalUniqueSources, 0 ); enforceBCCU( Up, mMu, mLambda, t+mDt, BCForcing, 0 ); } else { cartesian_bc_forcing( t+mDt, BCForcing, m_globalUniqueSources ); enforceBC( Up, mMu, mLambda, t+mDt, BCForcing ); } if( m_checkfornan ) check_for_nan( Up, 1, "Up" ); // increment time t += mDt; time_measure[9] = MPI_Wtime(); // periodically, print time stepping info to stdout printTime( currentTimeStep, t, currentTimeStep == mNumberOfTimeSteps ); // Images have to be written before the solution arrays are cycled, because both Up and Um are needed // to compute a centered time derivative // m_cuobj->sync_stream(0); double time_chkpt, time_chkpt_tmp; bool wrote=false; time_chkpt=MPI_Wtime(); for( int c=0 ; c < m_check_points.size() ; c++ ) if( m_check_points[c]->timeToWrite( t, currentTimeStep, mDt) ) { for( int g=0 ; g < mNumberOfGrids ; g++ ) { U[g].copy_from_device(m_cuobj,true,0); Up[g].copy_from_device(m_cuobj,true,1); } hipDeviceSynchronize(); m_check_points[c]->write_checkpoint( t, currentTimeStep, U, Up ); wrote=true; } if( wrote ) { time_chkpt_tmp =MPI_Wtime()-time_chkpt; MPI_Allreduce( &time_chkpt_tmp, &time_chkpt, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD ); if( m_myrank == 0 ) cout << "Cpu time to write check point file " << time_chkpt << " seconds " << endl; } // save the current solution on receiver records (time-derivative require Up and Um for a 2nd order // approximation, so do this before cycling the arrays) if( m_cuobj->has_gpu() ) { if( ntloc > 0 ) { extractRecordDataCU( ntloc, modedev, i0dev, j0dev, k0dev, g0dev, urec_dev, dev_Um, dev_Up, mDt, gridsize_dev, dev_metric, dev_j, 0, nvals, urec_host[0], urec_hdev[0] ); // Note: extractRecordDataCU performs hipMemcpy of dev data to host, no explicit synchronization needed. int tsnr=0; for( int ts=0 ; ts < m_GlobalTimeSeries.size() ; ts++ ) if( m_GlobalTimeSeries[ts]->myPoint() ) m_GlobalTimeSeries[ts]->recordData(urec_host[tsnr++]); } } else { for (int ts=0; ts<m_GlobalTimeSeries.size(); ts++) { if (m_GlobalTimeSeries[ts]->myPoint()) { int i0 = m_GlobalTimeSeries[ts]->m_i0; int j0 = m_GlobalTimeSeries[ts]->m_j0; int k0 = m_GlobalTimeSeries[ts]->m_k0; int grid0 = m_GlobalTimeSeries[ts]->m_grid0; // // note that the solution on the new time step is in Up // also note that all quantities related to velocities lag by one time step; they are not // saved before the time stepping loop started extractRecordData(m_GlobalTimeSeries[ts]->getMode(), i0, j0, k0, grid0, uRec, Um, Up); m_GlobalTimeSeries[ts]->recordData(uRec); } } } // // Energy evaluation, requires all three time levels present, do before cycle arrays. // if( m_energy_test ) // compute_energy( mDt, currentTimeStep == mNumberOfTimeSteps, Um, U, Up, currentTimeStep ); // cycle the solution arrays cycleSolutionArrays(Um, U, Up, dev_Um, dev_U, dev_Up ); // time_measure[8] = MPI_Wtime(); time_measure[10] = MPI_Wtime(); // evaluate error for some test cases // if (m_lamb_test || m_point_source_test || m_rayleigh_wave_test ) if ( m_point_source_test && saveerror ) { float_sw4 errInf=0, errL2=0, solInf=0; //, solL2=0; exactSol( t, Up, m_globalUniqueSources ); // store exact solution in Up // // if (m_lamb_test) // // normOfSurfaceDifference( Up, U, errInf, errL2, solInf, solL2, a_Sources); normOfDifference( Up, U, errInf, errL2, solInf, m_globalUniqueSources ); if ( m_myrank == 0 ) cout << t << " " << errInf << " " << errL2 << " " << solInf << endl; } // time_measure[9] = MPI_Wtime(); time_measure[11] = MPI_Wtime(); // // See if it is time to write a restart file // // if (mRestartDumpInterval > 0 && currentTimeStep % mRestartDumpInterval == 0) // // serialize(currentTimeStep, U, Um); if( currentTimeStep > 1 ) { time_sum[0] += time_measure[1]-time_measure[0] + time_measure[5]-time_measure[4]; // F time_sum[1] += time_measure[2]-time_measure[1] + time_measure[6]-time_measure[5]; // RHS time_sum[2] += time_measure[3]-time_measure[2] + time_measure[8]-time_measure[7]; // bc comm. time_sum[3] += time_measure[4]-time_measure[3] + time_measure[9]-time_measure[8]; // bc phys. time_sum[4] += time_measure[7]-time_measure[6]; // super grid damping time_sum[5] += time_measure[10]-time_measure[9]; // print outs time_sum[6] += time_measure[11]-time_measure[10]; // compute exact solution time_sum[7] += time_measure[11]-time_measure[0]; // total measured } if( m_save_trace ) for( int s = 0 ; s < 12 ; s++ ) trdata[s+12*(currentTimeStep-beginCycle)]= time_measure[s]; } // end time stepping loop double time_end_solve = MPI_Wtime(); print_execution_time( time_start_solve, time_end_solve, "solver phase" ); if( m_output_detailed_timing ) print_execution_times( time_sum ); if ( m_point_source_test ) { if( m_cuobj->has_gpu() ) for( int g=0; g < mNumberOfGrids ; g++ ) U[g].copy_from_device(m_cuobj,true,0); float_sw4 errInf=0, errL2=0, solInf=0;//, solL2=0; exactSol( t, Up, m_globalUniqueSources ); // store exact solution in Up // // if (m_lamb_test) // // normOfSurfaceDifference( Up, U, errInf, errL2, solInf, solL2, a_Sources); normOfDifference( Up, U, errInf, errL2, solInf, m_globalUniqueSources ); if ( m_myrank == 0 ) { cout << "Errors at time " << t << " Linf = " << errInf << " L2 = " << errL2 << " norm of solution = " << solInf << endl; string fname = mPath+"PointSourceErr.txt"; ofstream esave(fname.c_str()); esave.precision(12); esave << t << " " << errInf << " " << errL2 << " " << solInf << endl; esave.close(); } } for (int ts=0; ts<m_GlobalTimeSeries.size(); ts++) m_GlobalTimeSeries[ts]->writeFile(); for( int g= 0 ; g < mNumberOfGrids ; g++ ) { F[g].page_unlock(m_cuobj); U[g].page_unlock(m_cuobj); Um[g].page_unlock(m_cuobj); Up[g].page_unlock(m_cuobj); } m_cuobj->reset_gpu(); if( m_save_trace ) { char fname[255]; snprintf(fname,255,"%s/trfile%04d.bin",mPath.c_str(),m_myrank); int fd = open(fname, O_WRONLY|O_TRUNC|O_CREAT, 0660); int twelve=12; int nsteps= mNumberOfTimeSteps-beginCycle+1; size_t nr=write(fd,&twelve,sizeof(int)); nr=write(fd,&nsteps,sizeof(int)); nr=write(fd,trdata,sizeof(double)*twelve*nsteps); close(fd); } } //----------------------------------------------------------------------- bool EW::proc_decompose_2d( int ni, int nj, int nproc, int proc_max[2] ) { // This routine determines a decomposition of nproc processors into // a 2D processor array proc_max[0] x proc_max[1], which gives minimal // communication boundary for a grid with ni x nj points. float_sw4 fmin = ni+nj; bool first = true; int p1max = ni/m_ppadding; int p2max = nj/m_ppadding; for( int p1 = 1 ; p1 <= nproc; p1++) if( nproc%p1 == 0 ) { int p2 = nproc/p1; if( p1 <= p1max && p2 <= p2max ) { // try to make each subdomain as square as possible float_sw4 f = fabs((float_sw4)(ni)/p1 - (float_sw4)(nj)/p2); if( f < fmin || first ) { fmin = f; proc_max[0] = p1; proc_max[1] = p2; first= false; } } } return !first; } //----------------------------------------------------------------------- void EW::decomp1d( int nglobal, int myid, int nproc, int& s, int& e ) // // Decompose index space 1 <= i <= nglobal into nproc blocks // returns start and end indices for block nr. myid, // where 0 <= myid <= nproc-1 // { int olap = 2*m_ppadding; int nlocal = (nglobal + (nproc-1)*olap ) / nproc; int deficit = (nglobal + (nproc-1)*olap ) % nproc; if( myid < deficit ) s = myid*(nlocal-olap) + myid+1; else s = myid*(nlocal-olap) + deficit+1; if (myid < deficit) nlocal = nlocal + 1; e = s + nlocal - 1; } //----------------------------------------------------------------------- void EW::setupMPICommunications() { if (mVerbose >= 1 && m_myrank == 0 ) cout << "***inside setupMPICommunications***"<< endl; // Define MPI datatypes for communication across processor boundaries m_send_type1.resize(2*mNumberOfGrids); m_send_type3.resize(2*mNumberOfGrids); m_send_type4.resize(2*mNumberOfGrids); // m_send_type21.resize(2*mNumberOfGrids); for( int g= 0 ; g < mNumberOfGrids ; g++ ) { // int ni = mU[g].m_ni, nj=mU[g].m_nj, nk=mU[g].m_nk; int ni = m_iEnd[g] - m_iStart[g] + 1; int nj = m_jEnd[g] - m_jStart[g] + 1; int nk = m_kEnd[g] - m_kStart[g] + 1; MPI_Type_vector( nj*nk, m_ppadding, ni, m_mpifloat, &m_send_type1[2*g] ); MPI_Type_vector( nk, m_ppadding*ni, ni*nj, m_mpifloat, &m_send_type1[2*g+1] ); if( m_corder ) { MPI_Type_vector( 3*nj*nk, m_ppadding, ni, m_mpifloat, &m_send_type3[2*g] ); MPI_Type_vector( 3*nk, m_ppadding*ni, ni*nj, m_mpifloat, &m_send_type3[2*g+1] ); MPI_Type_vector( 4*nj*nk, m_ppadding, ni, m_mpifloat, &m_send_type4[2*g] ); MPI_Type_vector( 4*nk, m_ppadding*ni, ni*nj, m_mpifloat, &m_send_type4[2*g+1] ); } else { MPI_Type_vector( nj*nk, 3*m_ppadding, 3*ni, m_mpifloat, &m_send_type3[2*g] ); MPI_Type_vector( nk, 3*m_ppadding*ni, 3*ni*nj, m_mpifloat, &m_send_type3[2*g+1] ); MPI_Type_vector( nj*nk, 4*m_ppadding, 4*ni, m_mpifloat, &m_send_type4[2*g] ); MPI_Type_vector( nk, 4*m_ppadding*ni, 4*ni*nj, m_mpifloat, &m_send_type4[2*g+1] ); } MPI_Type_commit( &m_send_type1[2*g] ); MPI_Type_commit( &m_send_type1[2*g+1] ); MPI_Type_commit( &m_send_type3[2*g] ); MPI_Type_commit( &m_send_type3[2*g+1] ); MPI_Type_commit( &m_send_type4[2*g] ); MPI_Type_commit( &m_send_type4[2*g+1] ); } } //----------------------------------------------------------------------- bool EW::check_for_nan( vector<Sarray>& a_U, int verbose, string name ) { bool retval = false; for( int g=0 ; g<mNumberOfGrids; g++ ) { size_t nn=a_U[g].count_nans(); retval = retval || nn > 0; if( nn > 0 && verbose == 1 ) { int cnan, inan, jnan, knan; a_U[g].count_nans(cnan,inan,jnan,knan); cout << "grid " << g << " array " << name << " found " << nn << " nans. First nan at " << cnan << " " << inan << " " << jnan << " " << knan << endl; } } return retval; } //----------------------------------------------------------------------- void EW::cycleSolutionArrays(vector<Sarray> & a_Um, vector<Sarray> & a_U, vector<Sarray> & a_Up, Sarray*& dev_Um, Sarray*& dev_U, Sarray*& dev_Up ) { for (int g=0; g<mNumberOfGrids; g++) { float_sw4 *tmp = a_Um[g].c_ptr(); a_Um[g].reference(a_U[g].c_ptr()); a_U[g].reference(a_Up[g].c_ptr()); a_Up[g].reference(tmp); if( m_cuobj->has_gpu() ) { tmp = a_Um[g].dev_ptr(); a_Um[g].reference_dev( a_U[g].dev_ptr()); a_U[g].reference_dev( a_Up[g].dev_ptr()); a_Up[g].reference_dev(tmp ); } } Sarray* tmp = dev_Um; dev_Um = dev_U; dev_U = dev_Up; dev_Up = tmp; } //----------------------------------------------------------------------- void EW::Force(float_sw4 a_t, vector<Sarray> & a_F, vector<GridPointSource*> point_sources, bool tt ) { for( int g =0 ; g < mNumberOfGrids ; g++ ) a_F[g].set_to_zero(); #pragma omp parallel for for( int r=0 ; r<m_identsources.size()-1 ; r++ ) { int s0 = m_identsources[r]; int g = point_sources[s0]->m_grid; int i = point_sources[s0]->m_i0; int j = point_sources[s0]->m_j0; int k = point_sources[s0]->m_k0; size_t ind1 = a_F[g].index(1,i,j,k); size_t oc = a_F[g].m_offc; float_sw4* fptr =a_F[g].c_ptr(); for( int s=m_identsources[r]; s< m_identsources[r+1] ; s++ ) { float_sw4 fxyz[3]; if( tt ) point_sources[s]->getFxyztt(a_t,fxyz); else point_sources[s]->getFxyz(a_t,fxyz); fptr[ind1] += fxyz[0]; fptr[ind1+oc] += fxyz[1]; fptr[ind1+2*oc] += fxyz[2]; } } } //--------------------------------------------------------------------------- void EW::evalPredictor(vector<Sarray> & a_Up, vector<Sarray> & a_U, vector<Sarray> & a_Um, vector<Sarray>& a_Rho, vector<Sarray> & a_Lu, vector<Sarray> & a_F ) { float_sw4 dt2 = mDt*mDt; for( int g=0 ; g<mNumberOfGrids; g++ ) { predfort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Lu[g].c_ptr(), a_F[g].c_ptr(), a_Rho[g].c_ptr(), dt2 ); } } //--------------------------------------------------------------------------- void EW::evalCorrector(vector<Sarray> & a_Up, vector<Sarray>& a_Rho, vector<Sarray> & a_Lu, vector<Sarray> & a_F ) { float_sw4 dt4 = mDt*mDt*mDt*mDt; for( int g=0 ; g<mNumberOfGrids; g++ ) { corrfort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], a_Up[g].c_ptr(), a_Lu[g].c_ptr(), a_F[g].c_ptr(), a_Rho[g].c_ptr(), dt4 ); } } //--------------------------------------------------------------------------- void EW::evalDpDmInTime(vector<Sarray> & a_Up, vector<Sarray> & a_U, vector<Sarray> & a_Um, vector<Sarray> & a_Uacc ) { float_sw4 dt2i = 1./(mDt*mDt); for(int g=0 ; g<mNumberOfGrids; g++ ) { dpdmtfort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Uacc[g].c_ptr(), dt2i ); } } //----------------------------------------------------------------------- void EW::evalRHS(vector<Sarray> & a_U, vector<Sarray>& a_Mu, vector<Sarray>& a_Lambda, vector<Sarray> & a_Uacc ) { for(int g=0 ; g<mNumberOfCartesianGrids; g++ ) { if( m_corder ) rhs4sg_rev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], m_global_nz[g], m_onesided[g], m_acof, m_bope, m_ghcof, a_Uacc[g].c_ptr(), a_U[g].c_ptr(), a_Mu[g].c_ptr(), a_Lambda[g].c_ptr(), mGridSize[g], m_sg_str_x[g], m_sg_str_y[g], m_sg_str_z[g] ); else rhs4sg( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], m_global_nz[g], m_onesided[g], m_acof, m_bope, m_ghcof, a_Uacc[g].c_ptr(), a_U[g].c_ptr(), a_Mu[g].c_ptr(), a_Lambda[g].c_ptr(), mGridSize[g], m_sg_str_x[g], m_sg_str_y[g], m_sg_str_z[g] ); #ifdef DEBUG_CUDA printf("params = %d, %d, %d, %d, %d, %d \n %f, %f, %f, %f \n %f, %f, %f, %f \n %d \n", m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], (a_Uacc[g].c_ptr())[1], (a_U[g].c_ptr())[1], (a_Mu[g].c_ptr())[1], (a_Lambda[g].c_ptr())[1], mGridSize[g], m_sg_str_x[g][1], m_sg_str_y[g][1], m_sg_str_z[g][1], m_ghost_points); printf("onesided[%d](4,5) = %d, %d\n", g, m_onesided[g][4], m_onesided[g][5]); #endif } if( m_topography_exists ) { int g=mNumberOfGrids-1; if( m_corder ) rhs4sgcurv_rev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], a_U[g].c_ptr(), a_Mu[g].c_ptr(), a_Lambda[g].c_ptr(), mMetric.c_ptr(), mJ.c_ptr(), a_Uacc[g].c_ptr(), m_onesided[g], m_acof, m_bope, m_ghcof, m_sg_str_x[g], m_sg_str_y[g] ); else rhs4sgcurv( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], a_U[g].c_ptr(), a_Mu[g].c_ptr(), a_Lambda[g].c_ptr(), mMetric.c_ptr(), mJ.c_ptr(), a_Uacc[g].c_ptr(), m_onesided[g], m_acof, m_bope, m_ghcof, m_sg_str_x[g], m_sg_str_y[g] ); } } //----------------------------------------------------------------------- void EW::communicate_array( Sarray& u, int grid ) { REQUIRE2( u.m_nc == 1 || u.m_nc == 3 || u.m_nc == 4, "Communicate array, only implemented for nc=1,3, and 4 " << " nc = " << u.m_nc ); int ie = u.m_ie, ib=u.m_ib, je=u.m_je, jb=u.m_jb, kb=u.m_kb;//,ke=u.m_ke; MPI_Status status; if( u.m_nc == 1 ) { int xtag1 = 345; int xtag2 = 346; int ytag1 = 347; int ytag2 = 348; // X-direction communication MPI_Sendrecv( &u(ie-(2*m_ppadding-1),jb,kb), 1, m_send_type1[2*grid], m_neighbor[1], xtag1, &u(ib,jb,kb), 1, m_send_type1[2*grid], m_neighbor[0], xtag1, m_cartesian_communicator, &status ); MPI_Sendrecv( &u(ib+m_ppadding,jb,kb), 1, m_send_type1[2*grid], m_neighbor[0], xtag2, &u(ie-(m_ppadding-1),jb,kb), 1, m_send_type1[2*grid], m_neighbor[1], xtag2, m_cartesian_communicator, &status ); // Y-direction communication MPI_Sendrecv( &u(ib,je-(2*m_ppadding-1),kb), 1, m_send_type1[2*grid+1], m_neighbor[3], ytag1, &u(ib,jb,kb), 1, m_send_type1[2*grid+1], m_neighbor[2], ytag1, m_cartesian_communicator, &status ); MPI_Sendrecv( &u(ib,jb+m_ppadding,kb), 1, m_send_type1[2*grid+1], m_neighbor[2], ytag2, &u(ib,je-(m_ppadding-1),kb), 1, m_send_type1[2*grid+1], m_neighbor[3], ytag2, m_cartesian_communicator, &status ); } else if( u.m_nc == 3 ) { int xtag1 = 345; int xtag2 = 346; int ytag1 = 347; int ytag2 = 348; // X-direction communication MPI_Sendrecv( &u(1,ie-(2*m_ppadding-1),jb,kb), 1, m_send_type3[2*grid], m_neighbor[1], xtag1, &u(1,ib,jb,kb), 1, m_send_type3[2*grid], m_neighbor[0], xtag1, m_cartesian_communicator, &status ); MPI_Sendrecv( &u(1,ib+m_ppadding,jb,kb), 1, m_send_type3[2*grid], m_neighbor[0], xtag2, &u(1,ie-(m_ppadding-1),jb,kb), 1, m_send_type3[2*grid], m_neighbor[1], xtag2, m_cartesian_communicator, &status ); // Y-direction communication MPI_Sendrecv( &u(1,ib,je-(2*m_ppadding-1),kb), 1, m_send_type3[2*grid+1], m_neighbor[3], ytag1, &u(1,ib,jb,kb), 1, m_send_type3[2*grid+1], m_neighbor[2], ytag1, m_cartesian_communicator, &status ); MPI_Sendrecv( &u(1,ib,jb+m_ppadding,kb), 1, m_send_type3[2*grid+1], m_neighbor[2], ytag2, &u(1,ib,je-(m_ppadding-1),kb), 1, m_send_type3[2*grid+1], m_neighbor[3], ytag2, m_cartesian_communicator, &status ); } else if( u.m_nc == 4 ) { int xtag1 = 345; int xtag2 = 346; int ytag1 = 347; int ytag2 = 348; // X-direction communication MPI_Sendrecv( &u(1,ie-(2*m_ppadding-1),jb,kb), 1, m_send_type4[2*grid], m_neighbor[1], xtag1, &u(1,ib,jb,kb), 1, m_send_type4[2*grid], m_neighbor[0], xtag1, m_cartesian_communicator, &status ); MPI_Sendrecv( &u(1,ib+m_ppadding,jb,kb), 1, m_send_type4[2*grid], m_neighbor[0], xtag2, &u(1,ie-(m_ppadding-1),jb,kb), 1, m_send_type4[2*grid], m_neighbor[1], xtag2, m_cartesian_communicator, &status ); // Y-direction communication MPI_Sendrecv( &u(1,ib,je-(2*m_ppadding-1),kb), 1, m_send_type4[2*grid+1], m_neighbor[3], ytag1, &u(1,ib,jb,kb), 1, m_send_type4[2*grid+1], m_neighbor[2], ytag1, m_cartesian_communicator, &status ); MPI_Sendrecv( &u(1,ib,jb+m_ppadding,kb), 1, m_send_type4[2*grid+1], m_neighbor[2], ytag2, &u(1,ib,je-(m_ppadding-1),kb), 1, m_send_type4[2*grid+1], m_neighbor[3], ytag2, m_cartesian_communicator, &status ); } } //----------------------------------------------------------------------- void EW::cartesian_bc_forcing( float_sw4 t, vector<float_sw4**> & a_BCForcing, vector<Source*>& a_sources ) // assign the boundary forcing arrays a_BCForcing[g][side] { for(int g=0 ; g<mNumberOfGrids; g++ ) { if( m_point_source_test ) { for( int side=0 ; side < 6 ; side++ ) if( m_bcType[g][side] == bDirichlet ) get_exact_point_source( a_BCForcing[g][side], t, g, *a_sources[0], &m_BndryWindow[g][6*side] ); else for (int q=0; q<3*m_NumberOfBCPoints[g][side]; q++) a_BCForcing[g][side][q] = 0; } else { // no boundary forcing // we can do the same loop for all types of bc. For bParallel boundaries, numberOfBCPoints=0 for( int side=0 ; side < 6 ; side++ ) for( int q=0 ; q < 3*m_NumberOfBCPoints[g][side] ; q++ ) a_BCForcing[g][side][q] = 0.; } } } //----------------------------------------------------------------------- void EW::setup_boundary_arrays( ) { m_BndryWindow.resize(mNumberOfGrids); m_NumberOfBCPoints.resize(mNumberOfGrids); for (int g=0; g<mNumberOfGrids; g++ ) { m_BndryWindow[g] = new int[36]; m_NumberOfBCPoints[g] = new int[6]; for(int side=0; side<6 ; side++ ) { m_NumberOfBCPoints[g][side] = 0; for (int qq=0; qq<6; qq+=2) // 0, 2, 4 m_BndryWindow[g][qq + side*6]= 999; for (int qq=1; qq<6; qq+=2) // 1, 3, 5 m_BndryWindow[g][qq + side*6]= -999; } int wind[6]; for(int side=0; side<6 ; side++ ) { if (m_bcType[g][side] == bStressFree || m_bcType[g][side] == bDirichlet || m_bcType[g][side] == bSuperGrid || m_bcType[g][side] == bPeriodic) { // modify the window for stress free bc to only hold one plane if (m_bcType[g][side] == bStressFree) { side_plane( g, side, wind, 1 ); // when calling side_plane with nGhost=1, you get the outermost grid plane // for Free surface conditions, we apply the forcing on the boundary itself, i.e., just // inside the ghost points // add/subtract the ghost point offset if( side == 0 ) { wind[0] += m_ghost_points; wind[1] = wind[0]; } else if( side == 1 ) { wind[0] -= m_ghost_points; wind[1] = wind[0]; } else if( side == 2 ) { wind[2] += m_ghost_points; wind[3] = wind[2]; } else if( side == 3 ) { wind[2] -= m_ghost_points; wind[3] = wind[2]; } else if( side == 4 ) { wind[4] += m_ghost_points; wind[5] = wind[4]; } else { wind[4] -= m_ghost_points; wind[5] = wind[4]; } } else // for Dirichlet, super grid, and periodic conditions, we // apply the forcing directly on the ghost points { side_plane( g, side, wind, m_ghost_points ); } int npts = (wind[5]-wind[4]+1)* (wind[3]-wind[2]+1)* (wind[1]-wind[0]+1); for (int qq=0; qq<6; qq++) m_BndryWindow[g][qq+side*6]=wind[qq]; m_NumberOfBCPoints[g][side] = npts; } } } } //----------------------------------------------------------------------- void EW::side_plane( int g, int side, int wind[6], int nGhost ) { wind[0] = m_iStart[g]; wind[1] = m_iEnd[g]; wind[2] = m_jStart[g]; wind[3] = m_jEnd[g]; wind[4] = m_kStart[g]; wind[5] = m_kEnd[g]; if( side == 0 ) wind[1] = wind[0] + (nGhost-1); else if( side == 1 ) wind[0] = wind[1] - (nGhost-1); else if( side == 2 ) wind[3] = wind[2] + (nGhost-1); else if( side == 3 ) wind[2] = wind[3] - (nGhost-1); else if( side == 4 ) wind[5] = wind[4] + (nGhost-1); else wind[4] = wind[5] - (nGhost-1); } //----------------------------------------------------------------------- void EW::enforceBC( vector<Sarray> & a_U, vector<Sarray>& a_Mu, vector<Sarray>& a_Lambda, float_sw4 t, vector<float_sw4**> & a_BCForcing ) { float_sw4 om=0, ph=0, cv=0; for(int g=0 ; g<mNumberOfGrids; g++ ) { // int topo=topographyExists() && g == mNumberOfGrids-1; if( m_corder ) bcfortsg_indrev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], m_BndryWindow[g], m_global_nx[g], m_global_ny[g], m_global_nz[g], a_U[g].c_ptr(), mGridSize[g], m_bcType[g], m_sbop, a_Mu[g].c_ptr(), a_Lambda[g].c_ptr(), t, a_BCForcing[g][0], a_BCForcing[g][1], a_BCForcing[g][2], a_BCForcing[g][3], a_BCForcing[g][4], a_BCForcing[g][5], om, ph, cv, m_sg_str_x[g], m_sg_str_y[g] ); else bcfortsg( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], m_BndryWindow[g], m_global_nx[g], m_global_ny[g], m_global_nz[g], a_U[g].c_ptr(), mGridSize[g], m_bcType[g], m_sbop, a_Mu[g].c_ptr(), a_Lambda[g].c_ptr(), t, a_BCForcing[g][0], a_BCForcing[g][1], a_BCForcing[g][2], a_BCForcing[g][3], a_BCForcing[g][4], a_BCForcing[g][5], om, ph, cv, m_sg_str_x[g], m_sg_str_y[g] ); if( m_topography_exists && g == mNumberOfGrids-1 && m_bcType[g][4] == bStressFree ) { int side = 5; if( m_corder ) freesurfcurvisg_rev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], m_global_nz[g], side, a_U[g].c_ptr(), a_Mu[g].c_ptr(), a_Lambda[g].c_ptr(), mMetric.c_ptr(), m_sbop, a_BCForcing[g][4], m_sg_str_x[g], m_sg_str_y[g] ); else freesurfcurvisg( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], m_global_nz[g], side, a_U[g].c_ptr(), a_Mu[g].c_ptr(), a_Lambda[g].c_ptr(), mMetric.c_ptr(), m_sbop, a_BCForcing[g][4], m_sg_str_x[g], m_sg_str_y[g] ); } } enforceCartTopo( a_U ); } //----------------------------------------------------------------------- void EW::enforceCartTopo( vector<Sarray>& a_U ) { // interface between curvilinear and top Cartesian grid if (m_topography_exists) { int nc = 3; int g = mNumberOfCartesianGrids-1; int gc = mNumberOfGrids-1; int q, i, j; // inject solution values between lower boundary of gc and upper boundary of g for( j = m_jStart[g] ; j <= m_jEnd[g]; j++ ) for( i = m_iStart[g]; i <= m_iEnd[g]; i++ ) { // assign ghost points in the Cartesian grid for (q = 0; q < m_ghost_points; q++) // only once when m_ghost_points==1 { for( int c = 1; c <= nc ; c++ ) a_U[g](c,i,j,m_kStart[g] + q) = a_U[gc](c,i,j,m_kEnd[gc]-2*m_ghost_points + q); } // assign ghost points in the Curvilinear grid for (q = 0; q <= m_ghost_points; q++) // twice when m_ghost_points==1 (overwrites solution on the common grid line) { for( int c = 1; c <= nc ; c++ ) a_U[gc](c,i,j,m_kEnd[gc]-q) = a_U[g](c,i,j,m_kStart[g]+2*m_ghost_points - q); } } } } //----------------------------------------------------------------------- void EW::addSuperGridDamping(vector<Sarray> & a_Up, vector<Sarray> & a_U, vector<Sarray> & a_Um, vector<Sarray> & a_Rho ) { for(int g=0 ; g<mNumberOfCartesianGrids; g++ ) { if( m_sg_damping_order == 4 ) { if( m_corder ) addsgd4fort_indrev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(), m_sg_dc_x[g], m_sg_dc_y[g], m_sg_dc_z[g], m_sg_str_x[g], m_sg_str_y[g], m_sg_str_z[g], m_sg_corner_x[g], m_sg_corner_y[g], m_sg_corner_z[g], m_supergrid_damping_coefficient ); else addsgd4fort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(), m_sg_dc_x[g], m_sg_dc_y[g], m_sg_dc_z[g], m_sg_str_x[g], m_sg_str_y[g], m_sg_str_z[g], m_sg_corner_x[g], m_sg_corner_y[g], m_sg_corner_z[g], m_supergrid_damping_coefficient ); } else if( m_sg_damping_order == 6 ) { if( m_corder ) addsgd6fort_indrev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(), m_sg_dc_x[g], m_sg_dc_y[g], m_sg_dc_z[g], m_sg_str_x[g], m_sg_str_y[g], m_sg_str_z[g], m_sg_corner_x[g], m_sg_corner_y[g], m_sg_corner_z[g], m_supergrid_damping_coefficient ); else addsgd6fort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(), m_sg_dc_x[g], m_sg_dc_y[g], m_sg_dc_z[g], m_sg_str_x[g], m_sg_str_y[g], m_sg_str_z[g], m_sg_corner_x[g], m_sg_corner_y[g], m_sg_corner_z[g], m_supergrid_damping_coefficient ); } } if( m_topography_exists ) { int g=mNumberOfGrids-1; if( m_sg_damping_order == 4 ) { if( m_corder ) addsgd4cfort_indrev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(), m_sg_dc_x[g], m_sg_dc_y[g], m_sg_str_x[g], m_sg_str_y[g], mJ.c_ptr(), m_sg_corner_x[g], m_sg_corner_y[g], m_supergrid_damping_coefficient ); else addsgd4cfort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(), m_sg_dc_x[g], m_sg_dc_y[g], m_sg_str_x[g], m_sg_str_y[g], mJ.c_ptr(), m_sg_corner_x[g], m_sg_corner_y[g], m_supergrid_damping_coefficient ); } else if( m_sg_damping_order == 6 ) { if( m_corder ) addsgd6cfort_indrev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(), m_sg_dc_x[g], m_sg_dc_y[g], m_sg_str_x[g], m_sg_str_y[g], mJ.c_ptr(), m_sg_corner_x[g], m_sg_corner_y[g], m_supergrid_damping_coefficient ); else addsgd6cfort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(), m_sg_dc_x[g], m_sg_dc_y[g], m_sg_str_x[g], m_sg_str_y[g], mJ.c_ptr(), m_sg_corner_x[g], m_sg_corner_y[g], m_supergrid_damping_coefficient ); } } } //----------------------------------------------------------------------- void EW::printTime( int cycle, float_sw4 t, bool force ) const { if (!mQuiet && m_myrank == 0 && (force || mPrintInterval == 1 || (cycle % mPrintInterval) == 1 || cycle == 1) ) // string big enough for >1 million time steps cout << "Time step " << cycle << " t= " << t << endl; } //----------------------------------------------------------------------- bool EW::exactSol( float_sw4 a_t, vector<Sarray> & a_U, vector<Source*>& sources ) { bool retval=false; if( m_point_source_test ) { for( int g=0 ; g < mNumberOfGrids; g++ ) { size_t npts = static_cast<size_t>(m_iEnd[g]-m_iStart[g]+1)*(m_jEnd[g]-m_jStart[g]+1)*(m_kEnd[g]-m_kStart[g]+1); float_sw4* utmp = new float_sw4[npts*3]; // get_exact_point_source( a_U[g].c_ptr(), a_t, g, *sources[0] ); get_exact_point_source( utmp, a_t, g, *sources[0] ); a_U[g].assign( utmp, 0 ); delete[] utmp; } retval = true; } return retval; } //----------------------------------------------------------------------- // smooth wave for time dependence to test point force term with float_sw4 EW::SmoothWave(float_sw4 t, float_sw4 R, float_sw4 c) { float_sw4 temp = R; float_sw4 c0 = 2187./8., c1 = -10935./8., c2 = 19683./8., c3 = -15309./8., c4 = 2187./4.; if( (t-R/c) > 0 && (t-R/c) < 1 ) temp = (c0*pow(t-R/c,3)+c1*pow(t-R/c,4)+c2*pow(t-R/c,5)+c3*pow(t-R/c,6)+c4*pow(t-R/c,7)); else temp = 0; return temp; } //----------------------------------------------------------------------- // very smooth bump for time dependence for further testing of point force float_sw4 EW::VerySmoothBump(float_sw4 t, float_sw4 R, float_sw4 c) { float_sw4 temp = R; float_sw4 c0 = 1024, c1 = -5120, c2 = 10240, c3 = -10240, c4 = 5120, c5 = -1024; if( (t-R/c) > 0 && (t-R/c) < 1 ) temp = (c0*pow(t-R/c,5)+c1*pow(t-R/c,6)+c2*pow(t-R/c,7)+c3*pow(t-R/c,8)+c4*pow(t-R/c,9)+c5*pow(t-R/c,10)); else temp = 0; return temp; } //----------------------------------------------------------------------- // C6 smooth bump for time dependence for further testing of point force float_sw4 EW::C6SmoothBump(float_sw4 t, float_sw4 R, float_sw4 c) { float_sw4 retval = 0; if( (t-R/c) > 0 && (t-R/c) < 1 ) retval = 51480.0*pow( (t-R/c)*(1-t+R/c), 7 ); return retval; } //----------------------------------------------------------------------- // derivative of smooth wave float_sw4 EW::d_SmoothWave_dt(float_sw4 t, float_sw4 R, float_sw4 c) { float_sw4 temp = R; float_sw4 c0 = 2187./8., c1 = -10935./8., c2 = 19683./8., c3 = -15309./8., c4 = 2187./4.; if( (t-R/c) > 0 && (t-R/c) < 1 ) temp = (3*c0*pow(t-R/c,2)+4*c1*pow(t-R/c,3)+5*c2*pow(t-R/c,4)+6*c3*pow(t-R/c,5)+7*c4*pow(t-R/c,6)); else temp = 0; return temp; } //----------------------------------------------------------------------- // very smooth bump for time dependence to further testing of point force float_sw4 EW::d_VerySmoothBump_dt(float_sw4 t, float_sw4 R, float_sw4 c) { float_sw4 temp = R; float_sw4 c0 = 1024, c1 = -5120, c2 = 10240, c3 = -10240, c4 = 5120, c5 = -1024; // temp = where ( (t-R/c) > 0 && (t-R/c) < 1, (5*c0*pow(t-R/c,4)+6*c1*pow(t-R/c,5)+7*c2*pow(t-R/c,6)+8*c3*pow(t-R/c,7)+9*c4*pow(t-R/c,8))+10*c5*pow(t-R/c,9), 0); if( (t-R/c) > 0 && (t-R/c) < 1 ) temp = (5*c0*pow(t-R/c,4)+6*c1*pow(t-R/c,5)+7*c2*pow(t-R/c,6)+8*c3*pow(t-R/c,7)+9*c4*pow(t-R/c,8))+10*c5*pow(t-R/c,9); else temp = 0; return temp; } //----------------------------------------------------------------------- // C6 smooth bump for time dependence to further testing of point force float_sw4 EW::d_C6SmoothBump_dt(float_sw4 t, float_sw4 R, float_sw4 c) { float_sw4 retval=0; if( (t-R/c) > 0 && (t-R/c) < 1 ) retval = 51480.0*7*(1-2*(t-R/c))*pow((t-R/c)*(1-t+R/c),6); return retval; } //----------------------------------------------------------------------- // Primitive function (for T) of SmoothWave(t-T)*T float_sw4 EW::SWTP(float_sw4 Lim, float_sw4 t) { float_sw4 temp = Lim; float_sw4 c0 = 2187./8., c1 = -10935./8., c2 = 19683./8., c3 = -15309./8., c4 = 2187./4.; temp = (pow(t,3)*(c0 + c1*t + c2*pow(t,2) + c3*pow(t,3) + c4*pow(t,4))*pow(Lim,2))/2. - (pow(t,2)*(3*c0 + 4*c1*t + 5*c2*pow(t,2) + 6*c3*pow(t,3) + 7*c4*pow(t,4))*pow(Lim,3))/3. + (t*(3*c0 + 6*c1*t + 10*c2*pow(t,2) + 15*c3*pow(t,3) + 21*c4*pow(t,4))*pow(Lim,4))/4. + ((-c0 - 4*c1*t - 10*c2*pow(t,2) - 20*c3*pow(t,3) - 35*c4*pow(t,4))*pow(Lim,5))/5. + ((c1 + 5*c2*t + 15*c3*pow(t,2) + 35*c4*pow(t,3))*pow(Lim,6))/6. + ((-c2 - 6*c3*t - 21*c4*pow(t,2))*pow(Lim,7))/7. + ((c3 + 7*c4*t)*pow(Lim,8))/8. - (c4*pow(Lim,9))/9.; return temp; } //----------------------------------------------------------------------- // Primitive function (for T) of VerySmoothBump(t-T)*T float_sw4 EW::VSBTP(float_sw4 Lim, float_sw4 t) { float_sw4 temp = Lim; float_sw4 f = 1024., g = -5120., h = 10240., i = -10240., j = 5120., k = -1024.; temp = (pow(Lim,11)*(-25200*k*t-2520*j)+2310*k*pow(Lim,12)+(124740*k*pow(t,2) +24948*j*t+2772*i)*pow(Lim,10)+(-369600*k*pow(t,3)-110880*j*pow(t,2)-24640*i*t-3080*h)*pow(Lim,9)+(727650*k*pow(t,4)+291060*j*pow(t,3)+97020*i*pow(t,2)+24255*h*t+3465*g)*pow(Lim,8)+(-997920*k*pow(t,5)-498960*j*pow(t,4)-221760*i*pow(t,3)-83160*h*pow(t,2)-23760*g*t-3960*f)*pow(Lim,7)+(970200*k*pow(t,6)+582120*j*pow(t,5)+323400*i*pow(t,4)+161700*h*pow(t,3)+69300*g*pow(t,2)+23100*f*t)*pow(Lim,6)+(-665280*k*pow(t,7)-465696*j*pow(t,6)-310464*i*pow(t,5)-194040*h*pow(t,4)-110880*g*pow(t,3)-55440*f*pow(t,2))*pow(Lim,5)+ (311850*k*pow(t,8)+249480*j*pow(t,7)+194040*i*pow(t,6)+145530*h*pow(t,5)+103950*g*pow(t,4)+69300*f*pow(t,3))*pow(Lim,4)+(-92400* k*pow(t,9)-83160*j*pow(t,8)-73920*i*pow(t,7)-64680*h*pow(t,6)-55440*g*pow(t,5)-46200*f*pow(t,4))*pow(Lim,3)+(13860*k*pow(t,10)+13860*j*pow(t,9)+13860*i*pow(t,8)+13860*h*pow(t,7)+13860*g*pow(t,6)+13860*f*pow(t,5))*pow(Lim,2))/27720.0; return temp; } //----------------------------------------------------------------------- // Primitive function (for T) of C6SmoothBump(t-T)*T float_sw4 EW::C6SBTP(float_sw4 Lim, float_sw4 t) { float_sw4 x = t-Lim; return pow(x,8)*(-3217.5*pow(x,8)+3432.0*(7+t)*pow(x,7)-25740.0*(3+t)*pow(x,6) +27720.0*(5+3*t)*pow(x,5)-150150.0*(t+1)*x*x*x*x + 32760.0*(3+5*t)*x*x*x-36036.0*(1+3*t)*x*x+5720.0*(1+7*t)*x-6435.0*t); } //----------------------------------------------------------------------- // Integral of H(t-T)*H(1-t+T)*SmoothWave(t-T)*T from R/alpha to R/beta float_sw4 EW::SmoothWave_x_T_Integral(float_sw4 t, float_sw4 R, float_sw4 alpha, float_sw4 beta) { float_sw4 temp = R; float_sw4 lowL, hiL; // lowL = where(R / alpha > t - 1, R/alpha, t - 1); hiL = where(R / beta < t, R / beta, t); if( (R / alpha > t - 1 ) ) lowL = R/alpha; else lowL = t-1; if( R / beta < t ) hiL = R/beta; else hiL = t; // temp = where (lowL < t && hiL > t - 1, SWTP(hiL, t) - SWTP(lowL, t), 0.0); if( lowL < t && hiL > t - 1 ) temp = SWTP(hiL, t) - SWTP(lowL, t); else temp = 0; return temp; } //----------------------------------------------------------------------- // Integral of H(t-T)*H(1-t+T)*VerySmoothBump(t-T)*T from R/alpha to R/beta float_sw4 EW::VerySmoothBump_x_T_Integral(float_sw4 t, float_sw4 R, float_sw4 alpha, float_sw4 beta) { float_sw4 temp = R; float_sw4 lowL, hiL; // lowL = where(R / alpha > t - 1, R/alpha, t - 1); hiL = where(R / beta < t, R / beta, t); if( R / alpha > t - 1 ) lowL = R/alpha; else lowL = t-1; if( R / beta < t ) hiL = R/beta; else hiL = t; // temp = where (lowL < t && hiL > t - 1, VSBTP(hiL, t) - VSBTP(lowL, t), 0.0); if( lowL < t && hiL > t - 1 ) temp = VSBTP(hiL, t) - VSBTP(lowL, t); else temp = 0; return temp; } //----------------------------------------------------------------------- // Integral of H(t-T)*H(1-t+T)*C6SmoothBump(t-T)*T from R/alpha to R/beta float_sw4 EW::C6SmoothBump_x_T_Integral(float_sw4 t, float_sw4 R, float_sw4 alpha, float_sw4 beta) { float_sw4 temp = R; float_sw4 lowL, hiL; // lowL = where(R / alpha > t - 1, R/alpha, t - 1); hiL = where(R / beta < t, R / beta, t); if( R / alpha > t - 1 ) lowL = R/alpha; else lowL = t-1; if( R / beta < t ) hiL = R/beta; else hiL = t; // temp = where (lowL < t && hiL > t - 1, VSBTP(hiL, t) - VSBTP(lowL, t), 0.0); if( lowL < t && hiL > t - 1 ) temp = C6SBTP(hiL, t) - C6SBTP(lowL, t); else temp = 0; return temp; } //----------------------------------------------------------------------- float_sw4 EW::Gaussian(float_sw4 t, float_sw4 R, float_sw4 c, float_sw4 f ) { float_sw4 temp = R; temp = 1 /(f* sqrt(2*M_PI))*exp(-pow(t-R/c,2) / (2*f*f)); return temp; } //----------------------------------------------------------------------- float_sw4 EW::d_Gaussian_dt(float_sw4 t, float_sw4 R, float_sw4 c, float_sw4 f) { float_sw4 temp = R; temp = 1 /(f* sqrt(2*M_PI))*(-exp(-pow(t-R/c,2)/(2*f*f))*(t-R/c))/pow(f,2); return temp; } //----------------------------------------------------------------------- float_sw4 EW::Gaussian_x_T_Integral(float_sw4 t, float_sw4 R, float_sw4 f, float_sw4 alpha, float_sw4 beta) { float_sw4 temp = R; temp = -0.5*t*(erf( (t-R/beta)/(sqrt(2.0)*f)) - erf( (t-R/alpha)/(sqrt(2.0)*f)) ) - f/sqrt(2*M_PI)*( exp(-pow(t-R/beta,2)/(2*f*f) ) - exp( -pow(t-R/alpha,2)/(2*f*f) ) ) ; return temp; } //----------------------------------------------------------------------- void EW::get_exact_point_source( float_sw4* up, float_sw4 t, int g, Source& source, int* wind ) { timeDep tD; if(!( source.getName() == "SmoothWave" || source.getName() == "VerySmoothBump" || source.getName() == "C6SmoothBump" || source.getName()== "Gaussian") ) { cout << "EW::get_exact_point_source: Error, time dependency must be SmoothWave, VerySmoothBump, C6SmoothBump, or Gaussian, not " << source.getName() << endl; return; } else if( source.getName() == "SmoothWave" ) tD = iSmoothWave; else if( source.getName() == "VerySmoothBump" ) tD = iVerySmoothBump; else if( source.getName() == "C6SmoothBump" ) tD = iC6SmoothBump; else tD = iGaussian; // u.set_to_zero(); // Assume constant material, sample it in middle of domain int imid = (m_iStart[g]+m_iEnd[g])/2; int jmid = (m_jStart[g]+m_jEnd[g])/2; int kmid = (m_kStart[g]+m_kEnd[g])/2; float_sw4 rho = mRho[g](imid,jmid,kmid); float_sw4 beta = sqrt( mMu[g](imid,jmid,kmid)/rho); float_sw4 alpha = sqrt( (2*mMu[g](imid,jmid,kmid)+mLambda[g](imid,jmid,kmid))/rho); float_sw4 x0 = source.getX0(); float_sw4 y0 = source.getY0(); float_sw4 z0 = source.getZ0(); float_sw4 fr=source.getFrequency(); float_sw4 time = (t-source.getOffset()) * source.getFrequency(); if( tD == iGaussian ) { fr = 1/fr; time = time*fr; } bool ismomentsource = source.isMomentSource(); float_sw4 fx, fy, fz; float_sw4 mxx, myy, mzz, mxy, mxz, myz, m0; if( !ismomentsource ) { source.getForces( fx, fy, fz ); } else { source.getMoments( mxx, mxy, mxz, myy, myz, mzz ); // m0 = source.getAmplitude(); m0 = 1; } float_sw4 h = mGridSize[g]; float_sw4 eps = 1e-3*h; size_t ind = 0; int imax, imin, jmax, jmin, kmax, kmin; if( wind == 0 ) { imin = m_iStart[g]; imax = m_iEnd[g]; jmin = m_jStart[g]; jmax = m_jEnd[g]; kmin = m_kStart[g]; kmax = m_kEnd[g]; } else { imin = wind[0]; imax = wind[1]; jmin = wind[2]; jmax = wind[3]; kmin = wind[4]; kmax = wind[5]; } // Note: Use of ind, assumes loop is over the domain over which u is defined. // for( int k=m_kStart[g] ; k <= m_kEnd[g] ; k++ ) // for( int j=m_jStart[g] ; j <= m_jEnd[g] ; j++ ) // for( int i=m_iStart[g] ; i <= m_iEnd[g] ; i++ ) for( int k=kmin ; k <= kmax ; k++ ) for( int j=jmin ; j <= jmax ; j++ ) for( int i=imin ; i <= imax ; i++ ) { float_sw4 x,y,z; x = (i-1)*h; y = (j-1)*h; z = (k-1)*h + m_zmin[g]; if( !ismomentsource ) { float_sw4 R = sqrt( (x - x0)*(x - x0) + (y - y0)*(y - y0) + (z - z0)*(z - z0) ); if( R < eps ) up[3*ind] = up[3*ind+1] = up[3*ind+2] = 0; else { float_sw4 A, B; if (tD == iSmoothWave) { A = ( 1/pow(alpha,2) * SmoothWave(time, fr*R, alpha) - 1/pow(beta,2) * SmoothWave(time, fr*R, beta) + 3/pow(fr*R,2) * SmoothWave_x_T_Integral(time, fr*R, alpha, beta) ) / (4*M_PI*rho*R*R*R) ; B = ( 1/pow(beta,2) * SmoothWave(time, fr*R, beta) - 1/pow(fr*R,2) * SmoothWave_x_T_Integral(time, fr*R, alpha, beta) ) / (4*M_PI*rho*R) ; } else if (tD == iVerySmoothBump) { A = ( 1/pow(alpha,2) * VerySmoothBump(time, fr*R, alpha) - 1/pow(beta,2) * VerySmoothBump(time, fr*R, beta) + 3/pow(fr*R,2) * VerySmoothBump_x_T_Integral(time, fr*R, alpha, beta) ) / (4*M_PI*rho*R*R*R) ; B = ( 1/pow(beta,2) * VerySmoothBump(time, fr*R, beta) - 1/pow(fr*R,2) * VerySmoothBump_x_T_Integral(time, fr*R, alpha, beta) ) / (4*M_PI*rho*R) ; } else if (tD == iC6SmoothBump) { A = ( 1/pow(alpha,2) * C6SmoothBump(time, fr*R, alpha) - 1/pow(beta,2) * C6SmoothBump(time, fr*R, beta) + 3/pow(fr*R,2) * C6SmoothBump_x_T_Integral(time, fr*R, alpha, beta) ) / (4*M_PI*rho*R*R*R) ; B = ( 1/pow(beta,2) * C6SmoothBump(time, fr*R, beta) - 1/pow(fr*R,2) * C6SmoothBump_x_T_Integral(time, fr*R, alpha, beta) ) / (4*M_PI*rho*R) ; } else if( tD == iGaussian ) { A = ( 1/pow(alpha,2) * Gaussian(time, R, alpha,fr) - 1/pow(beta,2) * Gaussian(time, R, beta,fr) + 3/pow(R,2) * Gaussian_x_T_Integral(time, R, fr, alpha, beta) ) / (4*M_PI*rho*R*R*R) ; B = ( 1/pow(beta,2) * Gaussian(time, R, beta,fr) - 1/pow(R,2) * Gaussian_x_T_Integral(time, R, fr, alpha, beta) ) / (4*M_PI*rho*R) ; } up[3*ind] = ( (x - x0)*(x - x0)*fx + (x - x0)*(y - y0)*fy + (x - x0)*(z - z0)*fz )*A + fx*B; up[3*ind+1] = ( (y - y0)*(x - x0)*fx + (y - y0)*(y - y0)*fy + (y - y0)*(z - z0)*fz )*A + fy*B; up[3*ind+2] = ( (z - z0)*(x - x0)*fx + (z - z0)*(y - y0)*fy + (z - z0)*(z - z0)*fz )*A + fz*B; } } else { up[3*ind] = up[3*ind+1] = up[3*ind+2] = 0; // Here, ismomentsource == true float_sw4 R = sqrt( (x - x0)*(x - x0) + (y - y0)*(y - y0) + (z - z0)*(z - z0) ); if( R < eps ) { up[3*ind] = up[3*ind+1] = up[3*ind+2] = 0; } else { float_sw4 A, B, C, D, E; if (tD == iSmoothWave) { A = SmoothWave(time, R, alpha); B = SmoothWave(time, R, beta); C = SmoothWave_x_T_Integral(time, R, alpha, beta); D = d_SmoothWave_dt(time, R, alpha) / pow(alpha,3) / R; E = d_SmoothWave_dt(time, R, beta) / pow(beta,3) / R; } else if (tD == iVerySmoothBump) { A = VerySmoothBump(time, R, alpha); B = VerySmoothBump(time, R, beta); C = VerySmoothBump_x_T_Integral(time, R, alpha, beta); D = d_VerySmoothBump_dt(time, R, alpha) / pow(alpha,3) / R; E = d_VerySmoothBump_dt(time, R, beta) / pow(beta,3) / R; } else if (tD == iC6SmoothBump) { A = C6SmoothBump(time, R, alpha); B = C6SmoothBump(time, R, beta); C = C6SmoothBump_x_T_Integral(time, R, alpha, beta); D = d_C6SmoothBump_dt(time, R, alpha) / pow(alpha,3) / R; E = d_C6SmoothBump_dt(time, R, beta) / pow(beta,3) / R; } else if (tD == iGaussian) { A = Gaussian(time, R, alpha,fr); B = Gaussian(time, R, beta,fr); C = Gaussian_x_T_Integral(time, R, fr,alpha, beta); D = d_Gaussian_dt(time, R, alpha,fr) / pow(alpha,3) / R; E = d_Gaussian_dt(time, R, beta,fr) / pow(beta,3) / R; } up[3*ind] += // m_xx*G_xx,x + m0*mxx/(4*M_PI*rho)* ( + 3*(x-x0)*(x-x0)*(x-x0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - 2*(x-x0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + 3*(x-x0)*(x-x0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2)) + ( 15*(x-x0)*(x-x0)*(x-x0) / pow(R,7) - 6*(x-x0) / pow(R,5) ) * C + (x-x0)*(x-x0) / pow(R,3)* ((x-x0)*D - (x-x0)*E) - 1 / pow(R,3) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2)) - 3*(x-x0) / pow(R,5) * C + (x-x0) / (pow(R,3)*pow(beta,2)) * B + 1 / R * (x-x0)*E ); up[3*ind] += // m_yy*G_xy,y + m0*myy/(4*M_PI*rho)* ( + 3*(x-x0)*(y-y0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - (x-x0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + (x-x0)*(y-y0) / pow(R,3)* ((y-y0)*D - (y-y0)*E) + 3*(x-x0)*(y-y0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2)) + ( 15*(x-x0)*(y-y0)*(y-y0) / pow(R,7) - 3*(x-x0) / pow(R,5) ) * C ); up[3*ind] += // m_zz*G_xz,z + m0*mzz/(4*M_PI*rho)* ( + 3*(x-x0)*(z-z0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - (x-x0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + (x-x0)*(z-z0) / pow(R,3)* ((z-z0)*D - (z-z0)*E) + 3*(x-x0)*(z-z0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2)) + ( 15*(x-x0)*(z-z0)*(z-z0) / pow(R,7) - 3*(x-x0) / pow(R,5) ) * C ); up[3*ind] += // m_xy*G_xy,x + m0*mxy/(4*M_PI*rho)* ( + 3*(x-x0)*(x-x0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - (y-y0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + (x-x0)*(y-y0) / pow(R,3)* ((x-x0)*D - (x-x0)*E) + 3*(x-x0)*(y-y0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2)) + ( 15*(x-x0)*(x-x0)*(y-y0) / pow(R,7) - 3*(y-y0) / pow(R,5) ) * C ); up[3*ind] += // m_xy*G_xx,y + m0*mxy/(4*M_PI*rho)* ( + 3*(x-x0)*(x-x0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) + 3*(x-x0)*(x-x0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2)) + 15*(x-x0)*(x-x0)*(y-y0) / pow(R,7) * C + (x-x0)*(x-x0) / pow(R,3)* ((y-y0)*D - (y-y0)*E) - 1 / pow(R,3) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2)) - 3*(y-y0) / pow(R,5) * C + (y-y0) / (pow(R,3)*pow(beta,2)) * B + 1 / R * (y-y0)*E ); up[3*ind] += // m_xz*G_xz,x + m0*mxz/(4*M_PI*rho)* ( + 3*(x-x0)*(x-x0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - (z-z0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + (x-x0)*(z-z0) / pow(R,3)* ((x-x0)*D - (x-x0)*E) + 3*(x-x0)*(z-z0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2)) + ( 15*(x-x0)*(x-x0)*(z-z0) / pow(R,7) - 3*(z-z0) / pow(R,5) ) * C ); up[3*ind] += // m_yz*G_xz,y + m0*myz/(4*M_PI*rho)* ( + 3*(x-x0)*(y-y0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) + (x-x0)*(z-z0) / pow(R,3)* ((y-y0)*D - (y-y0)*E) + 3*(x-x0)*(z-z0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2)) + 15*(x-x0)*(y-y0)*(z-z0) / pow(R,7) * C ); up[3*ind] += // m_xz*G_xx,z + m0*mxz/(4*M_PI*rho)* ( + 3*(x-x0)*(x-x0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) + 3*(x-x0)*(x-x0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2)) + 15*(x-x0)*(x-x0)*(z-z0) / pow(R,7) * C + (x-x0)*(x-x0) / pow(R,3)* ((z-z0)*D - (z-z0)*E) - 1 / pow(R,3) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2)) - 3*(z-z0) / pow(R,5) * C + (z-z0) / (pow(R,3)*pow(beta,2)) * B + 1 / R * (z-z0)*E ); up[3*ind] += // m_yz*G_yx,z + m0*myz/(4*M_PI*rho)* ( + 3*(x-x0)*(y-y0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) + (x-x0)*(y-y0) / pow(R,3)* ((z-z0)*D - (z-z0)*E) + 3*(x-x0)*(y-y0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2)) + 15*(x-x0)*(y-y0)*(z-z0) / pow(R,7) * C ); //------------------------------------------------------------ up[3*ind+1] += // m_xx*G_xy,x m0*mxx/(4*M_PI*rho)* ( + 3*(x-x0)*(x-x0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - (y-y0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + (x-x0)*(y-y0) / pow(R,3)* ((x-x0)*D - (x-x0)*E) + 3*(x-x0)*(y-y0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2)) + ( 15*(x-x0)*(x-x0)*(y-y0) / pow(R,7) - 3*(y-y0) / pow(R,5) ) * C ); up[3*ind+1] += // m_yy**G_yy,y + m0*myy/(4*M_PI*rho)* ( + 3*(y-y0)*(y-y0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - 2*(y-y0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + 3*(y-y0)*(y-y0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2)) + ( 15*(y-y0)*(y-y0)*(y-y0) / pow(R,7) - 6*(y-y0) / pow(R,5) ) * C + (y-y0)*(y-y0) / pow(R,3)* ((y-y0)*D - (y-y0)*E) - 1 / pow(R,3) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2)) - 3*(y-y0) / pow(R,5) * C + (y-y0) / (pow(R,3)*pow(beta,2)) * B + 1 / R * (y-y0)*E ); up[3*ind+1] += // m_zz*G_zy,z + m0*mzz/(4*M_PI*rho)* ( + 3*(z-z0)*(z-z0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - (y-y0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + (z-z0)*(y-y0) / pow(R,3)* ((z-z0)*D - (z-z0)*E) + 3*(z-z0)*(y-y0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2)) + ( 15*(z-z0)*(z-z0)*(y-y0) / pow(R,7) - 3*(y-y0) / pow(R,5) ) * C ); up[3*ind+1] += // m_xy*G_yy,x + m0*mxy/(4*M_PI*rho)* ( + 3*(x-x0)*(y-y0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) + 3*(y-y0)*(y-y0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2)) + 15*(x-x0)*(y-y0)*(y-y0) / pow(R,7) * C + (y-y0)*(y-y0) / pow(R,3)* ((x-x0)*D - (x-x0)*E) - 1 / pow(R,3) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2)) - 3*(x-x0) / pow(R,5) * C + (x-x0) / (pow(R,3)*pow(beta,2)) * B + 1 / R * (x-x0)*E ); up[3*ind+1] += // m_xz*G_zy,x + m0*mxz/(4*M_PI*rho)* ( + 3*(x-x0)*(y-y0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) + (y-y0)*(z-z0) / pow(R,3)* ((x-x0)*D - (x-x0)*E) + 3*(y-y0)*(z-z0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2)) + 15*(x-x0)*(y-y0)*(z-z0) / pow(R,7) * C ); up[3*ind+1] += // m_xy*G_xy,y + m0*mxy/(4*M_PI*rho)* ( + 3*(x-x0)*(y-y0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - (x-x0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + (x-x0)*(y-y0) / pow(R,3)* ((y-y0)*D - (y-y0)*E) + 3*(x-x0)*(y-y0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2)) + ( 15*(x-x0)*(y-y0)*(y-y0) / pow(R,7) - 3*(x-x0) / pow(R,5) ) * C ); up[3*ind+1] += // m_yz*G_zy,y + m0*myz/(4*M_PI*rho)* ( + 3*(z-z0)*(y-y0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - (z-z0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + (z-z0)*(y-y0) / pow(R,3)* ((y-y0)*D - (y-y0)*E) + 3*(z-z0)*(y-y0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2)) + ( 15*(z-z0)*(y-y0)*(y-y0) / pow(R,7) - 3*(z-z0) / pow(R,5) ) * C ); up[3*ind+1] += // m_xz*G_xy,z + m0*mxz/(4*M_PI*rho)* ( + 3*(x-x0)*(y-y0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) + (x-x0)*(y-y0) / pow(R,3)* ((z-z0)*D - (z-z0)*E) + 3*(x-x0)*(y-y0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2)) + 15*(x-x0)*(y-y0)*(z-z0) / pow(R,7) * C ); up[3*ind+1] += // m_yz*G_yy,z + m0*myz/(4*M_PI*rho)* ( + 3*(z-z0)*(y-y0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) + 3*(y-y0)*(y-y0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2)) + 15*(z-z0)*(y-y0)*(y-y0) / pow(R,7) * C + (y-y0)*(y-y0) / pow(R,3)* ((z-z0)*D - (z-z0)*E) - 1 / pow(R,3) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2)) - 3*(z-z0) / pow(R,5) * C + (z-z0) / (pow(R,3)*pow(beta,2)) * B + 1 / R * (z-z0)*E ); //------------------------------------------------------------ up[3*ind+2] += // m_xx*G_zx,x + m0*mxx/(4*M_PI*rho)* ( + 3*(x-x0)*(x-x0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - (z-z0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + (x-x0)*(z-z0) / pow(R,3)* ((x-x0)*D - (x-x0)*E) + 3*(x-x0)*(z-z0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2)) + ( 15*(x-x0)*(x-x0)*(z-z0) / pow(R,7) - 3*(z-z0) / pow(R,5) ) * C ); up[3*ind+2] += // m_yy*G_zy,y + m0*myy/(4*M_PI*rho)* ( + 3*(y-y0)*(y-y0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - (z-z0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + (y-y0)*(z-z0) / pow(R,3)* ((y-y0)*D - (y-y0)*E) + 3*(y-y0)*(z-z0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2)) + ( 15*(y-y0)*(y-y0)*(z-z0) / pow(R,7) - 3*(z-z0) / pow(R,5) ) * C ); up[3*ind+2] += // m_zz**G_zz,z + m0*mzz/(4*M_PI*rho)* ( + 3*(z-z0)*(z-z0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - 2*(z-z0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + 3*(z-z0)*(z-z0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2)) + ( 15*(z-z0)*(z-z0)*(z-z0) / pow(R,7) - 6*(z-z0) / pow(R,5) ) * C + (z-z0)*(z-z0) / pow(R,3)* ((z-z0)*D - (z-z0)*E) - 1 / pow(R,3) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2)) - 3*(z-z0) / pow(R,5) * C + (z-z0) / (pow(R,3)*pow(beta,2)) * B + 1 / R * (z-z0)*E ); up[3*ind+2] += // m_xy*G_zy,x + m0*mxy/(4*M_PI*rho)* ( + 3*(x-x0)*(y-y0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) + (y-y0)*(z-z0) / pow(R,3)* ((x-x0)*D - (x-x0)*E) + 3*(y-y0)*(z-z0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2)) + 15*(x-x0)*(y-y0)*(z-z0) / pow(R,7) * C ); up[3*ind+2] += // m_xz**G_zz,x + m0*mxz/(4*M_PI*rho)* ( + 3*(x-x0)*(z-z0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) + 3*(z-z0)*(z-z0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2)) + 15*(x-x0)*(z-z0)*(z-z0) / pow(R,7) * C + (z-z0)*(z-z0) / pow(R,3)* ((x-x0)*D - (x-x0)*E) - 1 / pow(R,3) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2)) - 3*(x-x0) / pow(R,5) * C + (x-x0) / (pow(R,3)*pow(beta,2)) * B + 1 / R * (x-x0)*E ); up[3*ind+2] += // m_xy*G_xz,y + m0*mxy/(4*M_PI*rho)* ( + 3*(x-x0)*(y-y0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) + (x-x0)*(z-z0) / pow(R,3)* ((y-y0)*D - (y-y0)*E) + 3*(x-x0)*(z-z0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2)) + 15*(x-x0)*(y-y0)*(z-z0) / pow(R,7) * C ); up[3*ind+2] += // m_yz*G_zz,y + m0*myz/(4*M_PI*rho)* ( + 3*(y-y0)*(z-z0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) + 3*(z-z0)*(z-z0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2)) + 15*(y-y0)*(z-z0)*(z-z0) / pow(R,7) * C + (z-z0)*(z-z0) / pow(R,3)* ((y-y0)*D - (y-y0)*E) - 1 / pow(R,3) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2)) - 3*(y-y0) / pow(R,5) * C + (y-y0) / (pow(R,3)*pow(beta,2)) * B + 1 / R * (y-y0)*E ); up[3*ind+2] += // m_xz*G_xz,z + m0*mxz/(4*M_PI*rho)* ( + 3*(x-x0)*(z-z0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - (x-x0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + (x-x0)*(z-z0) / pow(R,3)* ((z-z0)*D - (z-z0)*E) + 3*(x-x0)*(z-z0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2)) + ( 15*(x-x0)*(z-z0)*(z-z0) / pow(R,7) - 3*(x-x0) / pow(R,5) ) * C ); up[3*ind+2] += // m_yz*G_yz,z + m0*myz/(4*M_PI*rho)* ( + 3*(z-z0)*(z-z0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2)) - (y-y0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2)) + (z-z0)*(y-y0) / pow(R,3)* ((z-z0)*D - (z-z0)*E) + 3*(z-z0)*(y-y0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2)) + ( 15*(z-z0)*(z-z0)*(y-y0) / pow(R,7) - 3*(y-y0) / pow(R,5) ) * C ); } } ind++; } } //----------------------------------------------------------------------- void EW::normOfDifference( vector<Sarray> & a_Uex, vector<Sarray> & a_U, float_sw4 &diffInf, float_sw4 &diffL2, float_sw4 &xInf, vector<Source*>& a_globalSources ) { float_sw4 linfLocal=0, l2Local=0, diffInfLocal=0, diffL2Local=0; float_sw4 xInfLocal=0, xInfGrid=0; float_sw4 htop = mGridSize[mNumberOfGrids-1]; float_sw4 hbot = mGridSize[0]; for(int g=0 ; g<mNumberOfGrids; g++ ) { float_sw4 radius =-1, x0=0, y0=0, z0=0; float_sw4 h = mGridSize[g]; int nsgxy = (int)(0.5+m_sg_gp_thickness*htop/h); int nsgz = (int)(0.5+m_sg_gp_thickness*hbot/h); int imin, imax, jmin, jmax, kmin, kmax; // Remove supergrid layers if (mbcGlobalType[0] == bSuperGrid) imin = max(m_iStartInt[g], nsgxy+1); else imin = m_iStartInt[g]; if (mbcGlobalType[1] == bSuperGrid) imax = min(m_iEndInt[g], m_global_nx[g] - nsgxy); else imax = m_iEndInt[g]; if (mbcGlobalType[2] == bSuperGrid) jmin = max(m_jStartInt[g], nsgxy+1); else jmin = m_jStartInt[g]; if (mbcGlobalType[3] == bSuperGrid) jmax = min(m_jEndInt[g], m_global_ny[g] - nsgxy); else jmax = m_jEndInt[g]; // Can not test on global type when there is more than one grid in the z-direction // if uppermost grid has layer on top boundary, the fine grid spacing is used for the s.g. layer width if (m_bcType[g][4] == bSuperGrid) kmin = max(m_kStartInt[g], nsgxy+1); else kmin = m_kStartInt[g]; // The lowermost grid has the s.g. layer width based on the spacing of the coarsest grid if (m_bcType[g][5] == bSuperGrid) kmax = min(m_kEndInt[g], m_global_nz[g] - nsgz); else kmax = m_kEndInt[g]; if( m_point_source_test ) { radius = 4*h; x0 = a_globalSources[0]->getX0(); y0 = a_globalSources[0]->getY0(); z0 = a_globalSources[0]->getZ0(); } solerr3fort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], h, a_Uex[g].c_ptr(), a_U[g].c_ptr(), linfLocal, l2Local, xInfGrid, m_zmin[g], x0, y0, z0, radius, imin, imax, jmin, jmax, kmin, kmax ); if (linfLocal > diffInfLocal) diffInfLocal = linfLocal; if (xInfGrid > xInfLocal) xInfLocal = xInfGrid; diffL2Local += l2Local; } // communicate local results for global errors MPI_Allreduce( &diffInfLocal, &diffInf, 1, m_mpifloat, MPI_MAX, m_cartesian_communicator ); MPI_Allreduce( &xInfLocal, &xInf, 1, m_mpifloat, MPI_MAX, m_cartesian_communicator ); MPI_Allreduce( &diffL2Local, &diffL2, 1, m_mpifloat, MPI_SUM, m_cartesian_communicator ); diffL2 = sqrt(diffL2); } //----------------------------------------------------------------------- void EW::check_dimensions() { for( int g= 0 ; g < mNumberOfGrids ; g++ ) { int nz=m_kEndInt[g]-m_kStartInt[g]+1; int nzmin; if( m_onesided[g][4] && m_onesided[g][5] ) nzmin = 12; else if( m_onesided[g][4] || m_onesided[g][5] ) nzmin = 8; else nzmin = 1; REQUIRE2( nz >= nzmin, "The number of grid points (not counting ghost pts) in the z-direction in grid " << g << " must be >= " << nzmin << " current value is " << nz ); int nx = m_iEndInt[g]-m_iStartInt[g]+1; REQUIRE2( nx >= 1, "No grid points left (not counting ghost pts) in the x-direction in grid " << g ); int ny = m_jEndInt[g]-m_jStartInt[g]+1; REQUIRE2( ny >= 1, "No grid points left (not counting ghost pts) in the y-direction in grid " << g ); } } //----------------------------------------------------------------------- void EW::setup_supergrid( ) { if (mVerbose >= 3 && m_myrank == 0 ) cout << "*** Inside setup_supergrid ***" << endl; // check to see if there are any supergrid boundary conditions m_use_supergrid = false; for( int side=0 ; side < 6 ; side++ ) if( mbcGlobalType[side] == bSuperGrid ) m_use_supergrid = true; if (mVerbose && m_myrank == 0 && m_use_supergrid) cout << "Detected at least one boundary with supergrid conditions" << endl; int gTop = mNumberOfCartesianGrids-1; int gBot = 0; m_supergrid_taper_z.resize(mNumberOfGrids); m_supergrid_taper_x.define_taper( (mbcGlobalType[0] == bSuperGrid), 0.0, (mbcGlobalType[1] == bSuperGrid), m_global_xmax, m_sg_gp_thickness*mGridSize[gTop] ); m_supergrid_taper_y.define_taper( (mbcGlobalType[2] == bSuperGrid), 0.0, (mbcGlobalType[3] == bSuperGrid), m_global_ymax, m_sg_gp_thickness*mGridSize[gTop] ); if( mNumberOfGrids == 1 ) m_supergrid_taper_z[0].define_taper( !m_topography_exists && (mbcGlobalType[4] == bSuperGrid), 0.0, (mbcGlobalType[5] == bSuperGrid), m_global_zmax, m_sg_gp_thickness*mGridSize[gBot] ); else { m_supergrid_taper_z[mNumberOfGrids-1].define_taper( !m_topography_exists && (mbcGlobalType[4] == bSuperGrid), 0.0, false, m_global_zmax, m_sg_gp_thickness*mGridSize[gTop] ); m_supergrid_taper_z[0].define_taper( false, 0.0, mbcGlobalType[5]==bSuperGrid, m_global_zmax, m_sg_gp_thickness*mGridSize[gBot] ); for( int g=1 ; g < mNumberOfGrids-1 ; g++ ) m_supergrid_taper_z[g].define_taper( false, 0.0, false, 0.0, m_sg_gp_thickness*mGridSize[gBot] ); } } //----------------------------------------------------------------------- void EW::assign_supergrid_damping_arrays() { int i, j, k, topCartesian; float_sw4 x, y, z; #define dcx(i,g) (m_sg_dc_x[g])[i-m_iStart[g]] #define dcy(j,g) (m_sg_dc_y[g])[j-m_jStart[g]] #define dcz(k,g) (m_sg_dc_z[g])[k-m_kStart[g]] #define strx(i,g) (m_sg_str_x[g])[i-m_iStart[g]] #define stry(j,g) (m_sg_str_y[g])[j-m_jStart[g]] #define strz(k,g) (m_sg_str_z[g])[k-m_kStart[g]] #define cornerx(i,g) (m_sg_corner_x[g])[i-m_iStart[g]] #define cornery(j,g) (m_sg_corner_y[g])[j-m_jStart[g]] #define cornerz(k,g) (m_sg_corner_z[g])[k-m_kStart[g]] // topCartesian = mNumberOfCartesianGrids-1; // Note: compared to WPP2, we don't need to center the damping coefficients on the half-point anymore, // because the damping term is now 4th order: D+D-( a(x) D+D- ut(x) ) topCartesian = mNumberOfCartesianGrids-1; if( m_use_supergrid ) { for( int g=0 ; g<mNumberOfGrids; g++) { for( i = m_iStart[g] ; i <= m_iEnd[g] ; i++ ) { x = (i-1)*mGridSize[g]; dcx(i,g) = m_supergrid_taper_x.dampingCoeff(x); strx(i,g) = m_supergrid_taper_x.stretching(x); cornerx(i,g) = m_supergrid_taper_x.cornerTaper(x); } for( j = m_jStart[g] ; j <= m_jEnd[g] ; j++ ) { y = (j-1)*mGridSize[g]; dcy(j,g) = m_supergrid_taper_y.dampingCoeff(y); stry(j,g) = m_supergrid_taper_y.stretching(y); cornery(j,g) = m_supergrid_taper_y.cornerTaper(y); } if (g > topCartesian || (0 < g && g < mNumberOfGrids-1) ) // Curvilinear or refinement grid { // No supergrid damping in the vertical (k-) direction on a curvilinear or refinement grid. for( k = m_kStart[g] ; k <= m_kEnd[g] ; k++ ) { dcz(k,g) = 0.; strz(k,g) = 1; cornerz(k,g) = 1.; } } else { for( k = m_kStart[g] ; k <= m_kEnd[g] ; k++ ) { z = m_zmin[g] + (k-1)*mGridSize[g]; dcz(k,g) = m_supergrid_taper_z[g].dampingCoeff(z); strz(k,g) = m_supergrid_taper_z[g].stretching(z); cornerz(k,g) = m_supergrid_taper_z[g].cornerTaper(z); } } } // end for g... } // end if m_use_supergrid else // { // Supergrid not used, but define arrays to simplify coding in some places. for( int g=0 ; g < mNumberOfGrids ; g++ ) { for( i = m_iStart[g] ; i <= m_iEnd[g] ; i++ ) { dcx(i,g) = 0; strx(i,g) = 1; cornerx(i,g) = 1.; } for( j = m_jStart[g] ; j <= m_jEnd[g] ; j++ ) { dcy(j,g) = 0; stry(j,g) = 1; cornery(j,g) = 1.; } for( k = m_kStart[g] ; k <= m_kEnd[g] ; k++ ) { dcz(k,g) = 0.; strz(k,g) = 1; cornerz(k,g) = 1.; } } } copy_supergrid_arrays_to_device(); #undef dcx #undef dcy #undef dcz #undef strx #undef stry #undef strz #undef cornerx #undef cornery #undef cornerz } //----------------------------------------------------------------------- void EW::assign_local_bcs( ) { // This routine assigns m_bcType[g][b], b=0,1,2,3, based on mbcGlobalType, taking parallel overlap boundaries into account int top=mNumberOfGrids-1; // index of the top grid in the arrays m_iStart, m_iEnd, etc // horizontal bc's are the same for all grids for( int g= 0 ; g < mNumberOfGrids ; g++ ) { // start by copying the global bc's for (int b=0; b<=3; b++) m_bcType[g][b] = mbcGlobalType[b]; if (m_iStart[top]+m_ghost_points > 1) { m_bcType[g][0] = bProcessor; } if (m_iEnd[top]-m_ghost_points < m_global_nx[top]) { m_bcType[g][1] = bProcessor; } if (m_jStart[top]+m_ghost_points > 1) { m_bcType[g][2] = bProcessor; } if (m_jEnd[top]-m_ghost_points < m_global_ny[top]) { m_bcType[g][3] = bProcessor; } } // vertical bc's are interpolating except at the bottom and the top, where they equal the global conditions // ( Only preliminary support for acoustic/elastic, not fully implemented) m_bcType[top][4] = mbcGlobalType[4]; for( int g = 0 ; g < mNumberOfGrids-1 ; g++ ) { if( m_is_curvilinear[g+1] && !m_is_curvilinear[g] ) // Elastic case only m_bcType[g][4] = bCCInterface; if( !m_is_curvilinear[g+1] && !m_is_curvilinear[g] ) // Two Cartesian grids, must be refinement bndry. m_bcType[g][4] = bRefInterface; if( !m_is_curvilinear[g+1] && m_is_curvilinear[g] ) // Acoustic case only m_bcType[g][4] = bCCInterface; if( m_is_curvilinear[g+1] && m_is_curvilinear[g] ) // Acoustic/Elastic interface m_bcType[g][4] = bAEInterface; } m_bcType[0][5] = mbcGlobalType[5]; for( int g = 1 ; g < mNumberOfGrids ; g++ ) { if( m_is_curvilinear[g] && !m_is_curvilinear[g-1] ) // Elastic case m_bcType[g][5] = bCCInterface; if( !m_is_curvilinear[g] && !m_is_curvilinear[g-1] ) // Two Cartesian grids, must be refinement bndry. m_bcType[g][5] = bRefInterface; if( !m_is_curvilinear[g] && m_is_curvilinear[g-1] ) // Acoustic case m_bcType[g][5] = bCCInterface; if( m_is_curvilinear[g] && m_is_curvilinear[g-1] ) // Acoustic/Elastic interface m_bcType[g][5] = bAEInterface; } // Find out which boundaries need one sided approximation in mixed derivatives for( int g= 0 ; g < mNumberOfGrids ; g++ ) for(int side=4 ; side < 6 ; side++ ) m_onesided[g][side] = (m_bcType[g][side] == bStressFree) || (m_bcType[g][side] == bRefInterface) || (m_bcType[g][side] == bAEInterface); } //----------------------------------------------------------------------- void EW::create_output_directory( ) { if (m_myrank == 0 ) { cout << "----------------------------------------------------" << endl << " Making Output Directory: " << mPath << endl << "\t\t" << endl; // Create directory where all these files will be written. int err = mkdirs(mPath); if (err == 0) cout << "... Done!" << endl << "----------------------------------------------------" << endl; else { // fatal error cerr << endl << "******** Failed to create the output directory *******" << endl << endl; MPI_Abort(MPI_COMM_WORLD,1); } // check that we have write permission on the directory if (access(mPath.c_str(),W_OK)!=0) { // fatal error cerr << endl << "Error: No write permission on output directory: " << mPath << endl; MPI_Abort(MPI_COMM_WORLD,1); } } // Let processor 0 finish first! cout.flush(); cerr.flush(); MPI_Barrier(MPI_COMM_WORLD); // Check that the mPath directory exists from all processes struct stat statBuf; int statErr = stat(mPath.c_str(), &statBuf); CHECK_INPUT(statErr == 0 && S_ISDIR(statBuf.st_mode), "Error: " << mPath << " is not a directory" << endl); // check that all processes have write permission on the directory CHECK_INPUT(access(mPath.c_str(),W_OK)==0, "Error: No write permission on output directory: " << mPath << endl); } //----------------------------------------------------------------------- int EW::mkdirs(const string& path) { // string pathTemp(path.begin(), path.end()); string pathTemp = path; //----------------------------------------------------------------- // Recursively call stat and then mkdir on each sub-directory in 'path' //----------------------------------------------------------------- string sep = "/"; char * pathtemparg = new char[pathTemp.length()+1]; strcpy(pathtemparg,pathTemp.c_str()); char* token = strtok( pathtemparg, sep.c_str() ); // char* token = strtok(const_cast<char*>(pathTemp.c_str()), sep.c_str()); stringstream pathsofar; // for checking the status: struct stat statBuf; int statErr; // If there's a leading slash, put it back on... if (strncmp(pathTemp.c_str(), sep.c_str(), 1) == 0) pathsofar << sep; while (token != NULL) { pathsofar << token << sep; // test: check the status of the path so far... // cout << "Calling stat() on path: " << pathsofar.str() << endl; statErr = stat(pathsofar.str().c_str(), &statBuf); if (statErr == 0) { // cout << "stat() returned successfully." << endl; if ( S_ISDIR(statBuf.st_mode) ) { // cout << "stat() says: '" << pathsofar.str() << "' is a directory." << endl; // it already exists, this is okay, let's get the next directory in the string and skip to the while statement token = strtok(NULL, sep.c_str()); continue; } else { cerr << "stat() says: '" << pathsofar.str() << "' is not a directory." << endl; // real error, let's bail... delete[] pathtemparg; return -1; } } else { // cerr << "stat() returned an error code." << endl; if (errno == EACCES) { cerr << "Error: **Search permission is denied for one of the directories in the path prefix of " << pathsofar.str() << endl; delete[] pathtemparg; return -1; } else if (errno == ENOTDIR) { cerr << "Error: **A component of the path '" << pathsofar.str() << "' is not a directory. " << endl; delete[] pathtemparg; return -1; } else if (errno == ENOENT) { // this means that we need to call mkdir to create the directory if (mVerbose >=2) cout << "Info: **stat returned ENOENT (the path does not exist, or the path " << endl << " is an empty string) " << pathsofar.str() << endl; } else { if (mVerbose >=2) cout << "Info: **stat returned other error code for path: " << pathsofar.str() << endl; } } // if we got this far, then 'pathsofar' does not exists // tmp if (mVerbose >=2) cout << "Calling mkdir() on path: " << pathsofar.str() << endl; // old code for recursively making the output directory if (mkdir(pathsofar.str().c_str(), S_IWUSR | S_IXUSR | S_IRUSR | S_IRGRP | S_IXGRP ) // why do we need group permissions? == -1) { if (mVerbose >=2) cout << "mkdir() returned an error code." << endl; // check error conditions if (errno == EEXIST) { // can this ever happen since we called stat(), which said that the directory did not exist ??? if (mVerbose >=2) cout << "Info: ** The directory already exists:" << pathsofar.str() << endl; // it already exists, this is okay! token = strtok(NULL, sep.c_str()); continue; } else if (errno == EACCES) cerr << "Error: **Write permission is denied for the parent directory in which the new directory is to be added." << pathsofar.str() << endl; else if (errno == EMLINK) cerr << "Error: **The parent directory has too many links (entries)." << pathsofar.str() << endl; else if (errno == ENOSPC) cerr << "Error: **The file system doesn't have enough room to create the new directory." << pathsofar.str() << endl; else if (errno == EROFS) cerr << "Error: ** The parent directory of the directory being created is on a read-only file system and cannot be modified." << pathsofar.str() << endl; else if (errno == ENOSPC) cerr << "Error: ** The new directory cannot be created because the user's disk quota is exhausted." << pathsofar.str() << endl; // real error, let's bail... delete[] pathtemparg; return -1; } else { if (mVerbose >=2) cout << "mkdir() returned successfully." << endl; // are there more directories to be made? token = strtok(NULL, sep.c_str()); } } delete[] pathtemparg; return 0; } //----------------------------------------------------------------------- void EW::computeDT() { if (!mQuiet && mVerbose >= 1 && m_myrank == 0 ) printf("*** computing the time step ***\n"); float_sw4 dtloc=1.e10; for (int g=0; g<mNumberOfCartesianGrids; g++) { float_sw4 eigmax = -1; for (int k=m_kStart[g]; k<=m_kEnd[g]; k++) for (int j=m_jStart[g]; j<=m_jEnd[g]; j++) for (int i=m_iStart[g]; i<=m_iEnd[g]; i++) { float_sw4 loceig = (4*mMu[g](i,j,k) + mLambda[g](i,j,k) )/mRho[g](i,j,k); eigmax = loceig > eigmax ? loceig:eigmax; // dtGP = mCFL*mGridSize[g]/sqrt( loceig ); // dtloc = dtloc < dtGP ? dtloc : dtGP; } float_sw4 ieigmax = 1/sqrt(eigmax); dtloc = dtloc < mCFL*mGridSize[g]*ieigmax ? dtloc : mCFL*mGridSize[g]*ieigmax; } if( m_topography_exists ) { #define SQR(x) (x)*(x) // Curvilinear grid float_sw4 dtCurv; int g = mNumberOfGrids-1; float_sw4 la, mu, la2mu; int N=3, LDZ=1, INFO=0; char JOBZ='N', UPLO='L'; float_sw4 eigmax = -1; // always use double precision version of lapack routine, for simplicity double Amat[6], W[3], Z[1], WORK[9]; // do consider ghost points (especially the ghost line above the topography might be important) for (int k=m_kStart[g]; k<=m_kEnd[g]; k++) for (int j=m_jStart[g]; j<=m_jEnd[g]; j++) for (int i=m_iStart[g]; i<=m_iEnd[g]; i++) { la = mLambda[g](i,j,k); mu = mMu[g](i,j,k); // for( int a = 0 ; a < m_number_mechanisms ; a++ ) // { // la += mLambdaVE[g][a](i,j,k); // mu += mMuVE[g][a](i,j,k); // } la2mu = la + 2.*mu; float_sw4 jinv = 1/mJ(i,j,k); // A11 Amat[0] = -4*(SQR(mMetric(1,i,j,k))*la2mu + SQR(mMetric(1,i,j,k))*mu + SQR(mMetric(2,i,j,k))*la2mu + SQR(mMetric(3,i,j,k))*mu + SQR(mMetric(4,i,j,k))*mu)*jinv; // A21 = A12 Amat[1] = -4.*mMetric(2,i,j,k)*mMetric(3,i,j,k)*(mu+la)*jinv; // A31 = A13 Amat[2] = -4.*mMetric(2,i,j,k)*mMetric(4,i,j,k)*(mu+la)*jinv; // A22 Amat[3] = -4.*(SQR(mMetric(1,i,j,k))*mu + SQR(mMetric(1,i,j,k))*la2mu + + SQR(mMetric(2,i,j,k))*mu + SQR(mMetric(3,i,j,k))*la2mu + SQR(mMetric(4,i,j,k))*mu)*jinv; // A32 = A23 Amat[4] = -4.*mMetric(3,i,j,k)*mMetric(4,i,j,k)*(mu+la)*jinv; // A33 Amat[5] = -4.*(SQR(mMetric(1,i,j,k))*mu + SQR(mMetric(1,i,j,k))*mu + SQR(mMetric(2,i,j,k))*mu + SQR(mMetric(3,i,j,k))*mu + SQR(mMetric(4,i,j,k))*la2mu)*jinv; // calculate eigenvalues of symmetric matrix //#ifndef SW4_CUDA F77_FUNC(dspev,DSPEV)(JOBZ, UPLO, N, Amat, W, Z, LDZ, WORK, INFO); //#endif if (INFO != 0) { printf("ERROR: computeDT: dspev returned INFO = %i for grid point (%i, %i, %i)\n", INFO, i, j, k); printf("lambda = %e, mu = %e\n", la, mu); printf("Jacobian = %15.7g \n",mJ(i,j,k)); printf("Matrix = \n"); printf(" %15.7g %15.7g %15.7g \n",Amat[0],Amat[1],Amat[2]); printf(" %15.7g %15.7g %15.7g \n",Amat[1],Amat[3],Amat[4]); printf(" %15.7g %15.7g %15.7g \n",Amat[2],Amat[4],Amat[5]); MPI_Abort(MPI_COMM_WORLD, 1); } // eigenvalues in ascending order: W[0] < W[1] < W[2] if (W[0] >= 0.) { printf("ERROR: computeDT: determining eigenvalue is non-negative; W[0] = %e at curvilinear grid point (%i, %i, %i)\n", W[0], i, j, k); MPI_Abort(MPI_COMM_WORLD, 1); } float_sw4 loceig = (-W[0])/(4.*mRho[g](i,j,k)); eigmax = loceig > eigmax ? loceig:eigmax; } float_sw4 ieigmax = 1/sqrt(eigmax); dtCurv = mCFL*ieigmax; dtloc = dtloc<dtCurv ? dtloc: dtCurv; #undef SQR } // end if topographyExists() mDt = dtloc; // compute the global minima MPI_Allreduce( &dtloc, &mDt, 1, m_mpifloat, MPI_MIN, m_cartesian_communicator); if (!mQuiet && mVerbose >= 1 && m_myrank == 0 ) cout << " CFL= " << mCFL << " prel. time step=" << mDt << endl; if( mTimeIsSet ) { // constrain the dt based on the goal time mNumberOfTimeSteps = static_cast<int> ((mTmax - mTstart) / mDt + 0.5); mNumberOfTimeSteps = (mNumberOfTimeSteps==0)? 1: mNumberOfTimeSteps; // the resulting mDt could be slightly too large, because the numberOfTimeSteps is rounded to the nearest int mDt = (mTmax - mTstart) / mNumberOfTimeSteps; } } //----------------------------------------------------------------------- void EW::computeNearestGridPoint(int & a_i, int & a_j, int & a_k, int & a_g, // grid on which indices are located float_sw4 a_x, float_sw4 a_y, float_sw4 a_z) { bool breakLoop = false; for (int g = 0; g < mNumberOfGrids; g++) { if (a_z > m_zmin[g] || g == mNumberOfGrids-1) // We can not trust zmin for the curvilinear grid, since it doesn't mean anything { a_i = (int)floor(a_x/mGridSize[g])+1; if (a_x-((a_i-0.5)*mGridSize[g]) > 0.) (a_i)++; a_j = (int)floor(a_y/mGridSize[g])+1; if (a_y-((a_j-0.5)*mGridSize[g]) > 0.) (a_j)++; a_k = (int)floor((a_z-m_zmin[g])/mGridSize[g])+1; //Note: this component will be garbage for g=curvilinear grid if (a_z-(m_zmin[g]+(a_k-0.5)*mGridSize[g]) > 0.) (a_k)++; a_g = g ; breakLoop = true; } else if (a_z == m_zmin[g]) // testing for equality between doubles is kind of pointless... { // Point is located on top surface if g=finest grid, else the location is on // a grid/grid interface, and point is flagged as located on the finer (upper) grid. if (g == mNumberOfGrids-1) { a_i = (int)floor(a_x/mGridSize[g])+1; if (a_x-((a_i-0.5)*mGridSize[g]) > 0.) (a_i)++; a_j = (int)floor(a_y/mGridSize[g])+1; if (a_y-((a_j-0.5)*mGridSize[g]) > 0.) (a_j)++; a_k = 1; a_g = g; } else { a_i = (int)floor(a_x/mGridSize[g+1])+1; if (a_x-((a_i-0.5)*mGridSize[g+1]) > 0.) (a_i)++; a_j = (int)floor(a_y/mGridSize[g+1])+1; if (a_y-((a_j-0.5)*mGridSize[g+1]) > 0.) (a_j)++; a_k = (int)floor((a_z-m_zmin[g+1])/mGridSize[g+1])+1; // Here, I know I am on a grid line a_g = g+1 ; } breakLoop = true; } if (breakLoop) { break; } } // if z > zmax in grid 0 because the coordinate has not yet been corrected for topography, we simply set a_k to m_kEnd if (m_topography_exists && a_z >= m_global_zmax) { a_k = m_kEnd[0]; a_g = 0; } if (!m_topography_exists || (m_topography_exists && a_g < mNumberOfCartesianGrids)) { VERIFY2(a_i >= 1-m_ghost_points && a_i <= m_global_nx[a_g]+m_ghost_points, "Grid Error: i (" << a_i << ") is out of bounds: ( " << 1 << "," << m_global_nx[a_g] << ")" << " x,y,z = " << a_x << " " << a_y << " " << a_z); VERIFY2(a_j >= 1-m_ghost_points && a_j <= m_global_ny[a_g]+m_ghost_points, "Grid Error: j (" << a_j << ") is out of bounds: ( " << 1 << "," << m_global_ny[a_g] << ")" << " x,y,z = " << a_x << " " << a_y << " " << a_z); VERIFY2(a_k >= m_kStart[a_g] && a_k <= m_kEnd[a_g], "Grid Error: k (" << a_k << ") is out of bounds: ( " << 1 << "," << m_kEnd[a_g]-m_ghost_points << ")" << " x,y,z = " << a_x << " " << a_y << " " << a_z); } } //----------------------------------------------------------------------- bool EW::interior_point_in_proc(int a_i, int a_j, int a_g) { // NOT TAKING PARALLEL GHOST POINTS INTO ACCOUNT! // Determine if grid point with index (a_i, a_j) on grid a_g is an interior grid point on this processor bool retval = false; if (a_g >=0 && a_g < mNumberOfGrids){ retval = (a_i >= m_iStartInt[a_g]) && (a_i <= m_iEndInt[a_g]) && (a_j >= m_jStartInt[a_g]) && (a_j <= m_jEndInt[a_g]); } return retval; } //----------------------------------------------------------------------- bool EW::point_in_proc(int a_i, int a_j, int a_g) { // TAKING PARALLEL GHOST POINTS INTO ACCOUNT! // Determine if grid point with index (a_i, a_j) on grid a_g is a grid point on this processor bool retval = false; if (a_g >=0 && a_g < mNumberOfGrids){ retval = (a_i >= m_iStart[a_g] && a_i <= m_iEnd[a_g] && a_j >= m_jStart[a_g] && a_j <= m_jEnd[a_g] ); } return retval; } //----------------------------------------------------------------------- bool EW::point_in_proc_ext(int a_i, int a_j, int a_g) { // TAKING PARALLEL GHOST POINTS+EXTRA GHOST POINTS INTO ACCOUNT! // Determine if grid point with index (a_i, a_j) on grid a_g is a grid point on this processor bool retval = false; if (a_g >=0 && a_g < mNumberOfGrids){ retval = (a_i >= m_iStart[a_g]-m_ext_ghost_points && a_i <= m_iEnd[a_g]+m_ext_ghost_points && a_j >= m_jStart[a_g]-m_ext_ghost_points && a_j <= m_jEnd[a_g]+m_ext_ghost_points ); } return retval; } //----------------------------------------------------------------------- bool EW::is_onesided( int g, int side ) const { return m_onesided[g][side] == 1; } //----------------------------------------------------------------------- void EW::print_execution_time( double t1, double t2, string msg ) { // if( !mQuiet && proc_zero() ) if( m_myrank == 0 ) { double s = t2 - t1; int h = static_cast<int>(s/3600.0); s = s - h*3600; int m = static_cast<int>(s/60.0); s = s - m*60; cout << " Execution time, " << msg << " "; if( h > 1 ) cout << h << " hours "; else if( h > 0 ) cout << h << " hour "; if( m > 1 ) cout << m << " minutes "; else if( m > 0 ) cout << m << " minute "; if( s > 0 ) cout << s << " seconds " ; cout << endl; } } //----------------------------------------------------------------------- void EW::print_execution_times( double times[8] ) { double* time_sums =new double[8*m_nprocs]; MPI_Gather( times, 8, MPI_DOUBLE, time_sums, 8, MPI_DOUBLE, 0, MPI_COMM_WORLD ); bool printavgs = true; if( m_myrank == 0 ) { double avgs[8]={0,0,0,0,0,0,0,0}; for( int p= 0 ; p < m_nprocs ; p++ ) for( int c=0 ; c < 8 ; c++ ) avgs[c] += time_sums[8*p+c]; for( int c=0 ; c < 8 ; c++ ) avgs[c] /= m_nprocs; cout << "\n----------------------------------------" << endl; cout << " Execution time summary " << endl; // cout << "Processor Total BC total Step Image&Time series Comm.ref Comm.bndry BC impose " if( printavgs ) { cout << " Total BC comm BC phys Scheme Supergrid Forcing " <<endl; cout.setf(ios::left); cout.precision(5); cout.width(11); cout << avgs[7]; cout.width(11); cout << avgs[2]; cout.width(11); cout << avgs[3]; cout.width(11); cout << avgs[1]; cout.width(11); cout << avgs[4]; cout.width(11); cout << avgs[0]; cout.width(11); } else { cout << "Processor Total BC comm BC phys Scheme Supergrid Forcing " <<endl; cout.setf(ios::left); cout.precision(5); for( int p= 0 ; p < m_nprocs ; p++ ) { cout.width(11); cout << p; cout.width(11); cout << time_sums[8*p+7]; cout.width(11); cout << time_sums[8*p+2]; cout.width(11); cout << time_sums[8*p+3]; cout.width(11); cout << time_sums[8*p+1]; cout.width(11); cout << time_sums[8*p+4]; cout.width(11); cout << time_sums[8*p]; cout.width(11); // cout << time_sums[7*p+4]; // cout.width(11); // cout << time_sums[7*p+5]; // cout.width(11); // cout << time_sums[7*p+6]; cout << endl; } } // // << "|" << time_sums[p*7+3] << "|\t" << time_sums[p*7+1] << "|\t" << time_sums[p*7] // << "|\t " << time_sums[7*p+2] << "|\t" << time_sums[p*7+4] << "|\t" << time_sums[p*7+5] // << "|\t" << time_sums[7*p+6]<<endl; cout << "Clock tick is " << MPI_Wtick() << " seconds" << endl; // cout << "MPI_Wtime is "; // int flag; // bool wtime_is_global; // MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &wtime_is_global, &flag ); // if( wtime_is_global ) // cout << "global"; // else // cout << "local"; // cout << endl; cout << "----------------------------------------\n" << endl; cout.setf(ios::right); cout.precision(6); // Save timings to file string fname = mPath+"timings.bin"; int fd=open( fname.c_str(), O_TRUNC|O_CREAT|O_WRONLY, 0660 ); if( fd == -1 ) cout << "Error opening " << fname.c_str() << " for writing execution times" << endl; size_t nr=write(fd,&m_nprocs,sizeof(int)); if( nr != sizeof(int) ) cout << "Error writing nprocs on " << fname.c_str() << " nr = " << nr << " bytes" << endl; nr = write(fd, time_sums, 7*m_nprocs*sizeof(double)); if( nr != 7*m_nprocs*sizeof(double) ) cout << "Error writing time_sums on " << fname.c_str() << " nr = " << nr << " bytes" << endl; close(fd); } delete[] time_sums; } //----------------------------------------------------------------------- bool EW::check_for_match_on_cpu_gpu( vector<Sarray>& a_U, int verbose, string name ) { bool retval=false; if( m_cuobj->has_gpu() ) { retval = false; for( int g=0 ; g<mNumberOfGrids; g++ ) { size_t nn=a_U[g].check_match_cpu_gpu( m_cuobj, name ); retval = retval || nn > 0; if( nn > 0 && verbose == 1 ) { int cnan, inan, jnan, knan; a_U[g].check_match_cpu_gpu( m_cuobj, cnan, inan, jnan, knan, name ); cout << "grid " << g << " array " << name << " found " << nn << " dismatch. First dismatch at " << cnan << " " << inan << " " << jnan << " " << knan << endl; } } } return retval; } //----------------------------------------------------------------------- void EW::setup_materials() { // Point source test sets material directly in processTestPointSource if( !m_point_source_test ) { // Undefined q-factors, attenutation not yet implemented vector<Sarray> Qs(mNumberOfGrids), Qp(mNumberOfGrids); for( int b=0 ; b < m_mtrlblocks.size() ; b++ ) m_mtrlblocks[b]->set_material_properties( mRho, mMu, mLambda, Qs, Qp ); // Here mMu contains cs, and mLambda contains cp int g = mNumberOfGrids-1; extrapolateInZ( g, mRho[g], true, false ); extrapolateInZ( g, mLambda[g], true, false ); extrapolateInZ( g, mMu[g], true, false ); g = 0; extrapolateInZ( g, mRho[g], false, true ); extrapolateInZ( g, mLambda[g], false, true ); extrapolateInZ( g, mMu[g], false, true ); extrapolateInXY( mRho ); extrapolateInXY( mMu ); extrapolateInXY( mLambda ); // Convert mMu to mu, and mLambda to lambda convert_material_to_mulambda( ); } } //----------------------------------------------------------------------- void EW::convert_material_to_mulambda( ) { for( int g = 0 ; g < mNumberOfGrids; g++) { // On input, we have stored cs in MU, cp in Lambda // use mu = rho*cs*cs and lambda = rho*cp*cp - 2*mu for( int k = m_kStart[g] ; k <= m_kEnd[g]; k++ ) { for( int j = m_jStart[g] ; j <= m_jEnd[g]; j++ ) { for( int i = m_iStart[g] ; i <= m_iEnd[g] ; i++ ) { mMu[g](i,j,k) = mRho[g](i,j,k)*mMu[g](i,j,k)*mMu[g](i,j,k); mLambda[g](i,j,k) = mRho[g](i,j,k)*mLambda[g](i,j,k)*mLambda[g](i,j,k)-2*mMu[g](i,j,k); } } } } } //----------------------------------------------------------------------- void EW::extrapolateInXY( vector<Sarray>& field ) { for( int g= 0; g < mNumberOfGrids ; g++ ) { if( m_iStartInt[g] == 1 ) for( int k=m_kStart[g] ; k <= m_kEnd[g] ; k++ ) for( int j=m_jStart[g] ; j <= m_jEnd[g] ; j++ ) for( int i=m_iStart[g] ; i < 1 ; i++ ) { if( field[g](i,j,k) == -1 ) field[g](i,j,k) = field[g](1,j,k); } if( m_iEndInt[g] == m_global_nx[g] ) for( int k=m_kStart[g] ; k <= m_kEnd[g] ; k++ ) for( int j=m_jStart[g] ; j <= m_jEnd[g] ; j++ ) for( int i=m_iEndInt[g]+1 ; i <= m_iEnd[g] ; i++ ) { if( field[g](i,j,k) == -1 ) field[g](i,j,k) = field[g](m_iEndInt[g],j,k); } if( m_jStartInt[g] == 1 ) for( int k=m_kStart[g] ; k <= m_kEnd[g] ; k++ ) for( int j=m_jStart[g] ; j < 1 ; j++ ) for( int i=m_iStart[g] ; i <= m_iEnd[g] ; i++ ) { if( field[g](i,j,k) == -1 ) field[g](i,j,k) = field[g](i,1,k); } if( m_jEndInt[g] == m_global_ny[g] ) for( int k=m_kStart[g] ; k <= m_kEnd[g] ; k++ ) for( int j=m_jEndInt[g]+1 ; j <= m_jEnd[g] ; j++ ) for( int i=m_iStart[g] ; i <= m_iEnd[g] ; i++ ) { if( field[g](i,j,k) == -1 ) field[g](i,j,k) = field[g](i,m_jEndInt[g],k); } // corners not necessary to treat explicitly??? } } //----------------------------------------------------------------------- void EW::extrapolateInZ( int g, Sarray& field, bool lowk, bool highk ) { if( lowk ) for( int k=m_kStart[g] ; k < 1 ; k++ ) for( int j=m_jStart[g] ; j <= m_jEnd[g] ; j++ ) for( int i=m_iStart[g] ; i <= m_iEnd[g] ; i++ ) if( field(i,j,k) == -1 ) field(i,j,k) = field(i,j,1); if( highk ) for( int k=m_kEndInt[g]+1 ; k <= m_kEnd[g] ; k++ ) for( int j=m_jStart[g] ; j <= m_jEnd[g] ; j++ ) for( int i=m_iStart[g] ; i <= m_iEnd[g] ; i++ ) if( field(i,j,k) == -1 ) field(i,j,k) = field(i,j,m_kEndInt[g]); } //----------------------------------------------------------------------- void EW::getGlobalBoundingBox(float_sw4 bbox[6]) { bbox[0] = 0.; bbox[1] = m_global_xmax; bbox[2] = 0.; bbox[3] = m_global_ymax; bbox[4] = m_global_zmin; bbox[5] = m_global_zmax; } //----------------------------------------------------------------------- bool EW::getDepth( float_sw4 x, float_sw4 y, float_sw4 z, float_sw4 & depth ) { if( !m_topography_exists ) { depth = z; return true; } else { float_sw4 ztopo=0; if( find_topo_zcoord_owner(x,y,ztopo) ) { depth = z-ztopo; return true; } else return false; } } //----------------------------------------------------------------------- bool EW::interpolate_topography( float_sw4 q, float_sw4 r, float_sw4 & Z0, bool smoothed) { // Interpolate the smoothed or raw topography // Assume that (q,r) are indices in the curvilinear grid. // if (q,r) is on this processor (need a 2x2 interval in (i,j)-index space: // Return true and assign Z0 corresponding to (q,r) // Returns false if // 1) (q,r) is outside the global parameter domain (expanded by ghost points) // 2) (q,r) is not on this processor // NOTE: // The parameters are normalized such that 1 <= q <= Nx is the full domain (without ghost points), // 1 <= r <= Ny. // 0. No topography, easy case: if( !topographyExists() ) { Z0 = 0; return true; } // 1. Check that the point is inside the domain int g = mNumberOfGrids-1; float_sw4 h = mGridSize[g]; float_sw4 qMin = (float_sw4) (1- m_ghost_points); float_sw4 qMax = (float_sw4) (m_global_nx[g] + m_ghost_points); float_sw4 rMin = (float_sw4) (1- m_ghost_points); float_sw4 rMax = (float_sw4) (m_global_ny[g] + m_ghost_points); if (!(q >= qMin && q <= qMax && r >= rMin && r <= rMax)) { Z0 = 0; return false; } // 2. Compute elevation at (q,r) float_sw4 tau; // holds the elevation at (q,r). Recall that elevation=-z if (m_analytical_topo) { float_sw4 X0 = (q-1.0)*h; float_sw4 Y0 = (r-1.0)*h; float_sw4 igx2 = 1.0/(m_GaussianLx*m_GaussianLx); float_sw4 igy2 = 1.0/(m_GaussianLy*m_GaussianLy); tau = m_GaussianAmp*exp(-(X0-m_GaussianXc)*(X0-m_GaussianXc)*igx2 -(Y0-m_GaussianYc)*(Y0-m_GaussianYc)*igy2 ); } else { // 3.Compute nearest grid point int iNear = static_cast<int>(round(q)); int jNear = static_cast<int>(round(r)); bool smackOnTop = (fabs(iNear-q) < 1.e-9 && fabs(jNear-r)) < 1.e-9; if (smackOnTop && point_in_proc(iNear,jNear,g)) { // 3a. (q,r) coincides with a grid point. Get elevation at that point. if (smoothed) tau = mTopoGridExt(iNear,jNear,1); else tau = mTopo(iNear,jNear,1); } else { // 3b. (q,r) not at a grid point. Interpolate to get the elevation. // Nearest lower grid point: int i = static_cast<int>(floor(q)); int j = static_cast<int>(floor(r)); if( point_in_proc_ext(i-3,j-3,g) && point_in_proc_ext(i+4,j+4,g) ) { float_sw4 a6cofi[8], a6cofj[8]; gettopowgh( q-i, a6cofi ); gettopowgh( r-j, a6cofj ); tau = 0; for( int l=j-3 ; l <= j+4 ; l++ ) for( int k=i-3 ; k <= i+4 ; k++ ) tau += a6cofi[k-i+3]*a6cofj[l-j+3]*mTopoGridExt(k,l,1); } else { Z0 = 0; return false; } } } Z0 = -tau; return true; } //----------------------------------------------------------------------- void EW::gettopowgh( float_sw4 ai, float_sw4 wgh[8] ) const { float_sw4 pol = ai*ai*ai*ai*ai*ai*ai*(-251+135*ai+25*ai*ai- 33*ai*ai*ai+6*ai*ai*ai*ai)/720; wgh[0] = -1.0/60*ai + 1.0/180*ai*ai + 1.0/48*ai*ai*ai + 23.0/144*ai*ai*ai*ai - (17.0*ai + 223.0)*ai*ai*ai*ai*ai/720 - pol; wgh[1] = 3.0/20*ai -3.0/40*ai*ai -1.0/6*ai*ai*ai - 13.0/12*ai*ai*ai*ai + 97.0/45*ai*ai*ai*ai*ai + 1.0/6*ai*ai*ai*ai*ai*ai + 7*pol; wgh[2] = -0.75*ai +0.75*ai*ai+(13.0+155*ai)*ai*ai*ai/48 -103.0/16*ai*ai*ai*ai*ai - 121.0/240*ai*ai*ai*ai*ai*ai - 21*pol; wgh[3] = 1 - 49.0/36*ai*ai - 49.0/9*ai*ai*ai*ai+385.0/36*ai*ai*ai*ai*ai + 61.0/72*ai*ai*ai*ai*ai*ai + 35*pol; wgh[4] = 0.75*ai + 0.75*ai*ai - 13.0/48*ai*ai*ai + 89.0/16*ai*ai*ai*ai - 1537.0/144*ai*ai*ai*ai*ai - 41.0/48*ai*ai*ai*ai*ai*ai - 35*pol; wgh[5] = -3.0/20*ai - 3.0/40*ai*ai + 1.0/6*ai*ai*ai - 41.0/12*ai*ai*ai*ai + 6.4*ai*ai*ai*ai*ai + 31.0/60*ai*ai*ai*ai*ai*ai + 21*pol; wgh[6] = 1.0/60*ai + 1.0/180*ai*ai - 1.0/48*ai*ai*ai + 167.0/144*ai*ai*ai*ai - 1537.0/720*ai*ai*ai*ai*ai- 25.0/144*ai*ai*ai*ai*ai*ai - 7*pol; wgh[7] = -1.0/6*ai*ai*ai*ai + 11.0/36*ai*ai*ai*ai*ai + 1.0/40*ai*ai*ai*ai*ai*ai + pol; } //----------------------------------------------------------------------- void EW::grid_mapping( float_sw4 q, float_sw4 r, float_sw4 s, float_sw4& x, float_sw4& y, float_sw4& z ) { int g=mNumberOfGrids-1; float_sw4 h=mGridSize[g]; x = (q-1)*h; y = (r-1)*h; float_sw4 ztopo; if( interpolate_topography(q,r,ztopo,true) ) { int nz = m_global_nz[g]; float_sw4 izb = 1.0/(m_zetaBreak*(nz-1)); float_sw4 sa = (s-1)*izb; float_sw4 omsm = (1-sa); for( int l=2 ; l <= m_grid_interpolation_order ; l++ ) omsm *= (1-sa); if( sa >= 1 ) z = m_topo_zmax - (nz-s)*h; else z = m_topo_zmax - (nz-s)*h - omsm*(m_topo_zmax-(nz-1)*h-ztopo); } else z = -1e38; // double zloc = z; // MPI_Allreduce( &zloc, &z, 1, MPI_DOUBLE, MPI_MAX, m_cartesian_communicator ); } //----------------------------------------------------------------------- bool EW::invert_grid_mapping( int g, float_sw4 x, float_sw4 y, float_sw4 z, float_sw4& q, float_sw4& r, float_sw4& s ) { // Translates (x,y,z) to grid indices on grid g. // Successful only if (x,y) is in my processor, will return false if // the point is outside the processor. // bool success=true; q = x/mGridSize[g]+1; r = y/mGridSize[g]+1; if( g < mNumberOfCartesianGrids ) s = (z-m_zmin[g])/mGridSize[g]+1; else { // Grid is curvilinear // Maximum number of iterations, and error tolerance // for Newton iterations int maxit = 10; float_sw4 tol = 1e-9; float_sw4 zTopo; if( interpolate_topography(q, r, zTopo, true ) ) { int nz = m_global_nz[g]; float_sw4 h = mGridSize[g]; float_sw4 izb = 1.0/m_zetaBreak; // Elastic region top grid, sun is s normalized to 0 < sun < 1 float_sw4 sun = 1-(m_topo_zmax-z)/((nz-1)*h); if( sun >= m_zetaBreak ) // In uniform part of grid s = (nz-1)*sun+1; else { // Non-uniform, solve for s by Newton iteration int it = 0; float_sw4 numerr=tol+1; while( numerr > tol && it < maxit ) { float_sw4 omsm = (1-izb*sun); for( int l=2 ; l <= m_grid_interpolation_order-1 ; l++ ) omsm *= (1-izb*sun); float_sw4 fp = h*(nz-1) + izb*m_grid_interpolation_order*omsm*(m_topo_zmax - (nz-1)*h - zTopo); omsm *= (1-izb*sun); float_sw4 f = m_topo_zmax - (nz-1)*h*(1-sun) - omsm*(m_topo_zmax-(nz-1)*h-zTopo)-z; float_sw4 ds= f/fp; numerr = fabs(ds); sun = sun - ds; it++; } s = (nz-1)*sun+1; if( numerr >= tol ) { cout << "EW::invert_grid_mapping: WARNING no convergence err=" << numerr << " tol = " << tol << endl; s = -1e38; success = false; } } } else { // point not in processor, could not evaluate topography s = -1e38; success = false; } } return success; } //----------------------------------------------------------------------- void EW::computeGeographicCoord(float_sw4 x, float_sw4 y, float_sw4 & longitude, float_sw4 & latitude) { float_sw4 deg2rad = M_PI/180.0; float_sw4 phi = mGeoAz * deg2rad; latitude = mLatOrigin + (x*cos(phi) - y*sin(phi))/mMetersPerDegree; if (mConstMetersPerLongitude) { longitude = mLonOrigin + (x*sin(phi) + y*cos(phi))/(mMetersPerLongitude); } else { longitude = mLonOrigin + (x*sin(phi) + y*cos(phi))/(mMetersPerDegree*cos(latitude*deg2rad)); } } //----------------------------------------------------------------------- void EW::computeCartesianCoord(float_sw4 &x, float_sw4 &y, float_sw4 lon, float_sw4 lat) { // ----------------------------------------------------------------- // Compute the cartesian coordinate given the geographic coordinate // ----------------------------------------------------------------- // if( m_geoproj == 0 ) // // compute x and y { float_sw4 deg2rad = M_PI/180.0; float_sw4 phi = mGeoAz * deg2rad; // x = mMetersPerDegree*(cos(phi)*(lat-mLatOrigin) + cos(lat*deg2rad)*(lon-mLonOrigin)*sin(phi)); // y = mMetersPerDegree*(-sin(phi)*(lat-mLatOrigin) + cos(lat*deg2rad)*(lon-mLonOrigin)*cos(phi)); if (mConstMetersPerLongitude) { x = mMetersPerDegree*cos(phi)*(lat-mLatOrigin) + mMetersPerLongitude*(lon-mLonOrigin)*sin(phi); y = mMetersPerDegree*(-sin(phi))*(lat-mLatOrigin) + mMetersPerLongitude*(lon-mLonOrigin)*cos(phi); } else { x = mMetersPerDegree*(cos(phi)*(lat-mLatOrigin) + cos(lat*deg2rad)*(lon-mLonOrigin)*sin(phi)); y = mMetersPerDegree*(-sin(phi)*(lat-mLatOrigin) + cos(lat*deg2rad)*(lon-mLonOrigin)*cos(phi)); } } // else // m_geoproj->computeCartesianCoord(x,y,lon,lat); } //----------------------------------------------------------------------- void EW::get_utc( int utc[7] ) const { for( int c=0 ; c < 7 ; c++ ) utc[c] = m_utc0[c]; } //----------------------------------------------------------------------- void EW::extractRecordData(TimeSeries::receiverMode mode, int i0, int j0, int k0, int g0, vector<float_sw4> &uRec, vector<Sarray> &Um2, vector<Sarray> &U) { if (mode == TimeSeries::Displacement) { uRec.resize(3); uRec[0] = U[g0](1, i0, j0, k0); uRec[1] = U[g0](2, i0, j0, k0); uRec[2] = U[g0](3, i0, j0, k0); } else if (mode == TimeSeries::Velocity) { uRec.resize(3); uRec[0] = (U[g0](1, i0, j0, k0) - Um2[g0](1, i0, j0, k0))/(2*mDt); uRec[1] = (U[g0](2, i0, j0, k0) - Um2[g0](2, i0, j0, k0))/(2*mDt); uRec[2] = (U[g0](3, i0, j0, k0) - Um2[g0](3, i0, j0, k0))/(2*mDt); } else if(mode == TimeSeries::Div) { uRec.resize(1); if (g0 < mNumberOfCartesianGrids) // must be a Cartesian grid { // int i=m_i0, j=m_j0, k=m_k0, g=m_grid0; float_sw4 factor = 1.0/(2*mGridSize[g0]); uRec[0] = ((U[g0](1,i0+1, j0, k0) - U[g0](1,i0-1, j0, k0)+ U[g0](2,i0, j0+1, k0) - U[g0](2,i0, j0-1, k0)+ U[g0](3,i0, j0, k0+1) - U[g0](3,i0, j0, k0-1))*factor); } else // must be curvilinear { // int i=m_i0, j=m_j0, k=m_k0, g=m_grid0; float_sw4 factor = 0.5/sqrt(mJ(i0,j0,k0)); uRec[0] = ( ( mMetric(1,i0,j0,k0)*(U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0))+ mMetric(1,i0,j0,k0)*(U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0))+ mMetric(2,i0,j0,k0)*(U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1))+ mMetric(3,i0,j0,k0)*(U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1))+ mMetric(4,i0,j0,k0)*(U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1)) )*factor); } } // end div else if(mode == TimeSeries::Curl) { uRec.resize(3); if (g0 < mNumberOfCartesianGrids) // must be a Cartesian grid { // int i=m_i0, j=m_j0, k=m_k0, g=m_grid0; float_sw4 factor = 1.0/(2*mGridSize[g0]); float_sw4 duydx = (U[g0](2,i0+1,j0,k0) - U[g0](2,i0-1,j0,k0))*factor; float_sw4 duzdx = (U[g0](3,i0+1,j0,k0) - U[g0](3,i0-1,j0,k0))*factor; float_sw4 duxdy = (U[g0](1,i0,j0+1,k0) - U[g0](1,i0,j0-1,k0))*factor; float_sw4 duzdy = (U[g0](3,i0,j0+1,k0) - U[g0](3,i0,j0-1,k0))*factor; float_sw4 duxdz = (U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1))*factor; float_sw4 duydz = (U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1))*factor; // if( m_xycomponent ) // { uRec[0] = ( duzdy-duydz ); uRec[1] = ( duxdz-duzdx ); uRec[2] = ( duydx-duxdy ); // } // else // { // float_sw4 uns = m_thynrm*(duzdy-duydz)-m_thxnrm*(duxdz-duzdx); // float_sw4 uew = m_salpha*(duzdy-duydz)+m_calpha*(duxdz-duzdx); // mRecordedUX.push_back( uew ); // mRecordedUY.push_back( uns ); // mRecordedUZ.push_back( -(duydx-duxdy) ); // } } else // must be curvilinear { // int i=m_i0, j=m_j0, k=m_k0, g=m_grid0; float_sw4 factor = 0.5/sqrt(mJ(i0,j0,k0)); float_sw4 duxdq = (U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0)); float_sw4 duydq = (U[g0](2,i0+1,j0,k0) - U[g0](2,i0-1,j0,k0)); float_sw4 duzdq = (U[g0](3,i0+1,j0,k0) - U[g0](3,i0-1,j0,k0)); float_sw4 duxdr = (U[g0](1,i0,j0+1,k0) - U[g0](1,i0,j0-1,k0)); float_sw4 duydr = (U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0)); float_sw4 duzdr = (U[g0](3,i0,j0+1,k0) - U[g0](3,i0,j0-1,k0)); float_sw4 duxds = (U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1)); float_sw4 duyds = (U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1)); float_sw4 duzds = (U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1)); float_sw4 duzdy = mMetric(1,i0,j0,k0)*duzdr+mMetric(3,i0,j0,k0)*duzds; float_sw4 duydz = mMetric(4,i0,j0,k0)*duyds; float_sw4 duxdz = mMetric(4,i0,j0,k0)*duxds; float_sw4 duzdx = mMetric(1,i0,j0,k0)*duzdq+mMetric(2,i0,j0,k0)*duzds; float_sw4 duydx = mMetric(1,i0,j0,k0)*duydq+mMetric(2,i0,j0,k0)*duyds; float_sw4 duxdy = mMetric(1,i0,j0,k0)*duxdr+mMetric(3,i0,j0,k0)*duxds; // if( m_xycomponent ) // { uRec[0] = (duzdy-duydz)*factor; uRec[1] = (duxdz-duzdx)*factor; uRec[2] = (duydx-duxdy)*factor; // } // else // { // float_sw4 uns = m_thynrm*(duzdy-duydz)-m_thxnrm*(duxdz-duzdx); // float_sw4 uew = m_salpha*(duzdy-duydz)+m_calpha*(duxdz-duzdx); // mRecordedUX.push_back( uew*factor ); // mRecordedUY.push_back( uns*factor ); // mRecordedUZ.push_back( -(duydx-duxdy)*factor ); // } } } // end Curl else if(mode == TimeSeries::Strains ) { uRec.resize(6); if (g0 < mNumberOfCartesianGrids) // must be a Cartesian grid { // int i=m_i0, j=m_j0, k=m_k0, g=m_grid0; float_sw4 factor = 1.0/(2*mGridSize[g0]); float_sw4 duydx = (U[g0](2,i0+1,j0,k0) - U[g0](2,i0-1,j0,k0))*factor; float_sw4 duzdx = (U[g0](3,i0+1,j0,k0) - U[g0](3,i0-1,j0,k0))*factor; float_sw4 duxdy = (U[g0](1,i0,j0+1,k0) - U[g0](1,i0,j0-1,k0))*factor; float_sw4 duzdy = (U[g0](3,i0,j0+1,k0) - U[g0](3,i0,j0-1,k0))*factor; float_sw4 duxdz = (U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1))*factor; float_sw4 duydz = (U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1))*factor; float_sw4 duxdx = (U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0))*factor; float_sw4 duydy = (U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0))*factor; float_sw4 duzdz = (U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1))*factor; uRec[0] = ( duxdx ); uRec[1] = ( duydy ); uRec[2] = ( duzdz ); uRec[3] = ( 0.5*(duydx+duxdy) ); uRec[4] = ( 0.5*(duzdx+duxdz) ); uRec[5] = ( 0.5*(duydz+duzdy) ); } else // must be curvilinear { // int i=m_i0, j=m_j0, k0=m_k00, g0=m_grid0; float_sw4 factor = 0.5/sqrt(mJ(i0,j0,k0)); float_sw4 duxdq = (U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0)); float_sw4 duydq = (U[g0](2,i0+1,j0,k0) - U[g0](2,i0-1,j0,k0)); float_sw4 duzdq = (U[g0](3,i0+1,j0,k0) - U[g0](3,i0-1,j0,k0)); float_sw4 duxdr = (U[g0](1,i0,j0+1,k0) - U[g0](1,i0,j0-1,k0)); float_sw4 duydr = (U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0)); float_sw4 duzdr = (U[g0](3,i0,j0+1,k0) - U[g0](3,i0,j0-1,k0)); float_sw4 duxds = (U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1)); float_sw4 duyds = (U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1)); float_sw4 duzds = (U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1)); float_sw4 duzdy = (mMetric(1,i0,j0,k0)*duzdr+mMetric(3,i0,j0,k0)*duzds)*factor; float_sw4 duydz = (mMetric(4,i0,j0,k0)*duyds)*factor; float_sw4 duxdz = (mMetric(4,i0,j0,k0)*duxds)*factor; float_sw4 duzdx = (mMetric(1,i0,j0,k0)*duzdq+mMetric(2,i0,j0,k0)*duzds)*factor; float_sw4 duydx = (mMetric(1,i0,j0,k0)*duydq+mMetric(2,i0,j0,k0)*duyds)*factor; float_sw4 duxdy = (mMetric(1,i0,j0,k0)*duxdr+mMetric(3,i0,j0,k0)*duxds)*factor; float_sw4 duxdx = ( mMetric(1,i0,j0,k0)*(U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0))+ mMetric(2,i0,j0,k0)*(U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1)) )*factor; float_sw4 duydy = ( mMetric(1,i0,j0,k0)*(U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0))+ mMetric(3,i0,j0,k0)*(U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1)) )*factor; float_sw4 duzdz = ( mMetric(4,i0,j0,k0)*(U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1)) )*factor; uRec[0] = ( duxdx ); uRec[1] = ( duydy ); uRec[2] = ( duzdz ); uRec[3] = ( 0.5*(duydx+duxdy) ); uRec[4] = ( 0.5*(duzdx+duxdz) ); uRec[5] = ( 0.5*(duydz+duzdy) ); } } // end Strains else if(mode == TimeSeries::DisplacementGradient ) { uRec.resize(9); if (g0 < mNumberOfCartesianGrids) // must be a Cartesian grid { // int i=m_i0, j=m_j0, k=m_k0, g=m_grid0; float_sw4 factor = 1.0/(2*mGridSize[g0]); float_sw4 duydx = (U[g0](2,i0+1,j0,k0) - U[g0](2,i0-1,j0,k0))*factor; float_sw4 duzdx = (U[g0](3,i0+1,j0,k0) - U[g0](3,i0-1,j0,k0))*factor; float_sw4 duxdy = (U[g0](1,i0,j0+1,k0) - U[g0](1,i0,j0-1,k0))*factor; float_sw4 duzdy = (U[g0](3,i0,j0+1,k0) - U[g0](3,i0,j0-1,k0))*factor; float_sw4 duxdz = (U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1))*factor; float_sw4 duydz = (U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1))*factor; float_sw4 duxdx = (U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0))*factor; float_sw4 duydy = (U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0))*factor; float_sw4 duzdz = (U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1))*factor; uRec[0] = duxdx; uRec[1] = duxdy; uRec[2] = duxdz; uRec[3] = duydx; uRec[4] = duydy; uRec[5] = duydz; uRec[6] = duzdx; uRec[7] = duzdy; uRec[8] = duzdz; } else // must be curvilinear { // int i=m_i0, j=m_j0, k0=m_k00, g0=m_grid0; float_sw4 factor = 0.5/sqrt(mJ(i0,j0,k0)); float_sw4 duxdq = (U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0)); float_sw4 duydq = (U[g0](2,i0+1,j0,k0) - U[g0](2,i0-1,j0,k0)); float_sw4 duzdq = (U[g0](3,i0+1,j0,k0) - U[g0](3,i0-1,j0,k0)); float_sw4 duxdr = (U[g0](1,i0,j0+1,k0) - U[g0](1,i0,j0-1,k0)); float_sw4 duydr = (U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0)); float_sw4 duzdr = (U[g0](3,i0,j0+1,k0) - U[g0](3,i0,j0-1,k0)); float_sw4 duxds = (U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1)); float_sw4 duyds = (U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1)); float_sw4 duzds = (U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1)); float_sw4 duzdy = (mMetric(1,i0,j0,k0)*duzdr+mMetric(3,i0,j0,k0)*duzds)*factor; float_sw4 duydz = (mMetric(4,i0,j0,k0)*duyds)*factor; float_sw4 duxdz = (mMetric(4,i0,j0,k0)*duxds)*factor; float_sw4 duzdx = (mMetric(1,i0,j0,k0)*duzdq+mMetric(2,i0,j0,k0)*duzds)*factor; float_sw4 duydx = (mMetric(1,i0,j0,k0)*duydq+mMetric(2,i0,j0,k0)*duyds)*factor; float_sw4 duxdy = (mMetric(1,i0,j0,k0)*duxdr+mMetric(3,i0,j0,k0)*duxds)*factor; float_sw4 duxdx = ( mMetric(1,i0,j0,k0)*(U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0))+ mMetric(2,i0,j0,k0)*(U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1)) )*factor; float_sw4 duydy = ( mMetric(1,i0,j0,k0)*(U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0))+ mMetric(3,i0,j0,k0)*(U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1)) )*factor; float_sw4 duzdz = ( mMetric(4,i0,j0,k0)*(U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1)) )*factor; uRec[0] = duxdx; uRec[1] = duxdy; uRec[2] = duxdz; uRec[3] = duydx; uRec[4] = duydy; uRec[5] = duydz; uRec[6] = duzdx; uRec[7] = duzdy; uRec[8] = duzdz; } } // end DisplacementGradient return; } //----------------------------------------------------------------------- void EW::default_bcs( ) { for( int side=0 ; side < 6 ; side++ ) mbcGlobalType[side] = bSuperGrid; mbcGlobalType[4] = bStressFree; // low-z is normally free surface } //----------------------------------------------------------------------- void EW::buildGaussianHillTopography(float_sw4 amp, float_sw4 Lx, float_sw4 Ly, float_sw4 x0, float_sw4 y0) { if (mVerbose >= 1 && (m_myrank == 0 ) ) cout << "***inside buildGaussianHillTopography***"<< endl; #define SQR(x) (x)*(x) int topLevel = mNumberOfGrids-1; float_sw4 x, y; // copy data m_analytical_topo = true; // m_analytical_topo = false; m_GaussianAmp = amp; m_GaussianLx = Lx; m_GaussianLy = Ly; m_GaussianXc = x0; m_GaussianYc = y0; for (int i = m_iStart[topLevel]; i <= m_iEnd[topLevel]; ++i) for (int j = m_jStart[topLevel]; j <= m_jEnd[topLevel]; ++j) { x = (i-1)*mGridSize[topLevel]; y = (j-1)*mGridSize[topLevel]; // positive topography is up (negative z) mTopo(i,j,1) = m_GaussianAmp*exp(-SQR((x-m_GaussianXc)/m_GaussianLx) -SQR((y-m_GaussianYc)/m_GaussianLy)); } for (int i = mTopoGridExt.m_ib ; i <= mTopoGridExt.m_ie ; ++i) for (int j = mTopoGridExt.m_jb ; j <= mTopoGridExt.m_je; ++j) { x = (i-1)*mGridSize[topLevel]; y = (j-1)*mGridSize[topLevel]; // positive topography is up (negative z) mTopoGridExt(i,j,1) = m_GaussianAmp*exp(-SQR((x-m_GaussianXc)/m_GaussianLx) -SQR((y-m_GaussianYc)/m_GaussianLy)); } #undef SQR } //----------------------------------------------------------------------- void EW::compute_minmax_topography( float_sw4& topo_zmin, float_sw4& topo_zmax ) { if( m_topography_exists ) { int g = mNumberOfGrids-1; int i=m_iStart[g], j=m_jEnd[g]; // The z-coordinate points downwards, so positive topography (above sea level) // gets negative z-values float_sw4 zMinLocal, zMaxLocal; zMaxLocal = zMinLocal = -mTopoGridExt(i,j,1); int imin = mTopoGridExt.m_ib; int imax = mTopoGridExt.m_ie; int jmin = mTopoGridExt.m_jb; int jmax = mTopoGridExt.m_je; for (i= imin ; i<=imax ; i++) for (j=jmin; j<=jmax ; j++) { if (-mTopoGridExt(i,j,1) > zMaxLocal) { zMaxLocal = -mTopoGridExt(i,j,1); } if (-mTopoGridExt(i,j,1) < zMinLocal) { zMinLocal = -mTopoGridExt(i,j,1); } } MPI_Allreduce( &zMinLocal, &topo_zmin, 1, m_mpifloat, MPI_MIN, m_cartesian_communicator); MPI_Allreduce( &zMaxLocal, &topo_zmax, 1, m_mpifloat, MPI_MAX, m_cartesian_communicator); } else { topo_zmin = topo_zmax = 0; } } //----------------------------------------------------------------------- void EW::generate_grid() { // Generate grid on domain: topography <= z <= zmax, // The 2D grid on z=zmax, is given by ifirst <= i <= ilast, jfirst <= j <= jlast // spacing h. if (!m_topography_exists ) return; // m_grid_interpolation_order = a_order; if (mVerbose >= 1 && (m_myrank==0) ) cout << "***inside generate_grid***"<< endl; // get the size from the top Cartesian grid int g = mNumberOfCartesianGrids-1; int ifirst = m_iStart[g]; int ilast = m_iEnd[g]; int jfirst = m_jStart[g]; int jlast = m_jEnd[g]; float_sw4 h = mGridSize[g]; // grid size must agree with top cartesian grid float_sw4 zMaxCart = m_zmin[g]; // bottom z-level for curvilinear grid int i, j; int gTop = mNumberOfGrids-1; int Nz = m_kEnd[gTop] - m_ghost_points; if(mVerbose > 4 && (m_myrank == 0 ) ) { printf("generate_grid: Number of grid points in curvilinear grid = %i, kStart = %i, kEnd = %i\n", Nz, m_kStart[gTop], m_kEnd[gTop]); } // generate the grid by calling the curvilinear mapping function float_sw4 X0, Y0, Z0; int k; for (k=m_kStart[gTop]; k<=m_kEnd[gTop]; k++) for (j=m_jStart[gTop]; j<=m_jEnd[gTop]; j++) for (i=m_iStart[gTop]; i<=m_iEnd[gTop]; i++) { grid_mapping((float_sw4) i, (float_sw4) j, (float_sw4) k, X0, Y0, Z0); mX(i,j,k) = X0; mY(i,j,k) = Y0; mZ(i,j,k) = Z0; } communicate_array( mZ, gTop ); // calculate min and max((mZ(i,j,k)-mZ(i,j,k-1))/h) for k=Nz k = Nz; float_sw4 hRatio; float_sw4 mZmin = 1.0e9, mZmax=0; for (j=m_jStart[gTop]; j<=m_jEnd[gTop]; j++) for (i=m_iStart[gTop]; i<=m_iEnd[gTop]; i++) { hRatio = (mZ(i,j,k)-mZ(i,j,k-1))/mGridSize[gTop]; if (hRatio < mZmin) mZmin = hRatio; if (hRatio > mZmax) mZmax = hRatio; } float_sw4 zMinGlobal, zMaxGlobal; MPI_Allreduce( &mZmin, &zMinGlobal, 1, m_mpifloat, MPI_MIN, m_cartesian_communicator); MPI_Allreduce( &mZmax, &zMaxGlobal, 1, m_mpifloat, MPI_MAX, m_cartesian_communicator); if(mVerbose > 3 && (m_myrank == 0) ) { printf("Curvilinear/Cartesian interface (k=Nz-1): Min grid size ratio - 1 = %e, max ratio z - 1 = %e, top grid # = %i\n", zMinGlobal-1., zMaxGlobal-1., gTop); } } //--------------------------------------------------------- void EW::setup_metric() { if (!m_topography_exists ) return; if (mVerbose >= 1 && (m_myrank == 0)) cout << "***inside setup_metric***"<< endl; int g=mNumberOfGrids-1; int Bx=m_iStart[g]; int By=m_jStart[g]; int Bz=m_kStart[g]; int Nx=m_iEnd[g]; int Ny=m_jEnd[g]; int Nz=m_kEnd[g]; if( m_analytical_topo && m_use_analytical_metric ) { // Gaussian hill topography, analytical expressions for metric derivatives. int nxg = m_global_nx[g]; int nyg = m_global_ny[g]; int nzg = m_global_nz[g]; float_sw4 h= mGridSize[g]; float_sw4 zmax = m_zmin[g-1] - (nzg-1)*h*(1-m_zetaBreak); if( m_corder ) metricexgh_rev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], m_global_nz[g], mX.c_ptr(), mY.c_ptr(), mZ.c_ptr(), mMetric.c_ptr(), mJ.c_ptr(), m_grid_interpolation_order, m_zetaBreak, zmax, m_GaussianAmp, m_GaussianXc, m_GaussianYc, m_GaussianLx, m_GaussianLy ); else metricexgh( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], m_global_nz[g], mX.c_ptr(), mY.c_ptr(), mZ.c_ptr(), mMetric.c_ptr(), mJ.c_ptr(), m_grid_interpolation_order, m_zetaBreak, zmax, m_GaussianAmp, m_GaussianXc, m_GaussianYc, m_GaussianLx, m_GaussianLy ); } else { int ierr=0; if( m_corder ) ierr = metric_rev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], mX.c_ptr(), mY.c_ptr(), mZ.c_ptr(), mMetric.c_ptr(), mJ.c_ptr() ); else ierr = metric( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], mX.c_ptr(), mY.c_ptr(), mZ.c_ptr(), mMetric.c_ptr(), mJ.c_ptr() ); CHECK_INPUT(ierr==0, "Problems calculating the metric coefficients"); } communicate_array( mMetric, mNumberOfGrids-1 ); communicate_array( mJ, mNumberOfGrids-1 ); // if( m_analytical_topo && !m_use_analytical_metric && mVerbose > 3 ) // // Test metric derivatives if available // metric_derivatives_test( ); float_sw4 minJ, maxJ; gridinfo( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g], mMetric.c_ptr(), mJ.c_ptr(), minJ, maxJ ); float_sw4 minJglobal, maxJglobal; MPI_Allreduce( &minJ, &minJglobal, 1, m_mpifloat, MPI_MIN, m_cartesian_communicator); MPI_Allreduce( &maxJ, &maxJglobal, 1, m_mpifloat, MPI_MAX, m_cartesian_communicator); if (mVerbose>3 && (m_myrank == 0)) printf("*** Jacobian of metric: minJ = %e maxJ = %e\n", minJglobal, maxJglobal); } //----------------------------------------------------------------------- bool EW::find_topo_zcoord_owner( float_sw4 X, float_sw4 Y, float_sw4& Ztopo ) { bool success = true; if ( m_topography_exists ) { float_sw4 h = mGridSize[mNumberOfGrids-1]; float_sw4 q, r; q = X/h + 1.0; r = Y/h + 1.0; // evaluate elevation of topography on the grid if (!interpolate_topography(q, r, Ztopo, true)) { cerr << "Unable to evaluate topography at" << " X= " << X << " Y= " << Y << endl; cerr << "Setting topography to ZERO" << endl; Ztopo = 0; success = false; } } else { Ztopo = 0; // no topography } return success; } //----------------------------------------------------------------------- bool EW::find_topo_zcoord_all( float_sw4 X, float_sw4 Y, float_sw4& Ztopo ) { bool success = true; if (m_topography_exists ) { float_sw4 h = mGridSize[mNumberOfGrids-1]; float_sw4 q, r; q = X/h + 1.0; r = Y/h + 1.0; float_sw4 Ztopoloc; // evaluate elevation of topography on the grid if (!interpolate_topography(q, r, Ztopoloc, true)) { Ztopoloc = -1e38; } MPI_Allreduce( &Ztopoloc, &Ztopo, 1, m_mpifloat, MPI_MAX, m_cartesian_communicator ); success = Ztopo > -1e38; } else { Ztopo = 0; // no topography success = true; } return success; } //----------------------------------------------------------------------- bool less_than( GridPointSource* ptsrc1, GridPointSource* ptsrc2 ) { return ptsrc1->m_key < ptsrc2->m_key; } //----------------------------------------------------------------------- void EW::sort_grid_point_sources() { size_t* gptr = new size_t[mNumberOfGrids]; gptr[0] = 0; for(int g=0 ; g < mNumberOfGrids-1 ; g++ ) { gptr[g+1] = gptr[g] + static_cast<size_t>((m_iEnd[g]-m_iStart[g]+1))* (m_jEnd[g]-m_jStart[g]+1)*(m_kEnd[g]-m_kStart[g]+1); } size_t* ni = new size_t[mNumberOfGrids]; size_t* nij = new size_t[mNumberOfGrids]; for(int g=0 ; g < mNumberOfGrids ; g++ ) { ni[g] = (m_iEnd[g]-m_iStart[g]+1); nij[g] = ni[g]*(m_jEnd[g]-m_jStart[g]+1); } for( int s=0 ; s < m_point_sources.size() ; s++ ) { int g = m_point_sources[s]->m_grid; size_t key = gptr[g] + (m_point_sources[s]->m_i0-m_iStart[g]) + ni[g]*(m_point_sources[s]->m_j0-m_jStart[g]) + nij[g]*(m_point_sources[s]->m_k0-m_kStart[g]); m_point_sources[s]->set_sort_key(key); } delete[] gptr; delete[] ni; delete[] nij; std::sort(m_point_sources.begin(), m_point_sources.end(), less_than ); // set up array detecting sources belonging to idential points m_identsources.resize(1); m_identsources[0] = 0; int k = 0; while( m_identsources[k] < m_point_sources.size() ) { int m = m_identsources[k]; size_t key = m_point_sources[m]->m_key; while( m+1 < m_point_sources.size() && m_point_sources[m+1]->m_key == key ) m++; m_identsources.push_back(m+1); k++; } // Test int nrsrc =m_point_sources.size(); int nrunique = m_identsources.size()-1; int nrsrctot, nruniquetot; MPI_Reduce( &nrsrc, &nrsrctot, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD ); MPI_Reduce( &nrunique, &nruniquetot, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD ); if( m_myrank == 0 ) { cout << "number of grid point sources = " << nrsrctot << endl; cout << "number of unique g.p. sources = " << nruniquetot << endl; } } //----------------------------------------------------------------------- void EW::copy_point_sources_to_gpu() { // new code, redefined dev_point_sources to be a GridPointSource* to // be able to copy the sources to device as an array instead of copying // them one by one. hipError_t retcode=hipMalloc( (void**)&dev_point_sources, sizeof(GridPointSource)*m_point_sources.size()); if( hipSuccess != retcode ) cout << "Error EW::copy_point_sources_to_gpu, hipMalloc, 1, retcode = " << hipGetErrorString(retcode) << endl; GridPointSource* hsources = new GridPointSource[m_point_sources.size()]; for( int s=0 ; s < m_point_sources.size() ; s++ ) hsources[s] = *(m_point_sources[s]); retcode = hipMemcpy( dev_point_sources, hsources, m_point_sources.size()*sizeof(GridPointSource), hipMemcpyHostToDevice ); if( hipSuccess != retcode ) cout << "Error EW::copy_point_sources_to_gpu, hipMemcpy, 1, retcode = " << hipGetErrorString(retcode) << endl; retcode = hipMalloc( (void**)&dev_identsources, sizeof(int)*m_identsources.size() ); if( hipSuccess != retcode ) cout << "Error EW::copy_point_sources_to_gpu, hipMalloc, 2, retcode = " << hipGetErrorString(retcode) << endl; retcode = hipMemcpy( dev_identsources, &m_identsources[0], sizeof(int)*m_identsources.size(), hipMemcpyHostToDevice ); if( hipSuccess != retcode ) cout << "Error EW::copy_point_sources_to_gpu, hipMemcpy, 2, retcode = " << hipGetErrorString(retcode) << endl; delete[] hsources; } //----------------------------------------------------------------------- void EW::CheckCudaCall(hipError_t command, const char * commandName, const char * fileName, int line) { if (command != hipSuccess) { fprintf(stderr, "Error: CUDA result \"%s\" for call \"%s\" in file \"%s\" at line %d. Terminating...\n", hipGetErrorString(command), commandName, fileName, line); exit(1); } }
convolution_sgemm_pack1to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack1to4_int8_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt) { #if NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD if (ncnn::cpu_support_arm_asimddp()) { void im2col_sgemm_pack1to4_int8_neon_arm82dot(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); im2col_sgemm_pack1to4_int8_neon_arm82dot(bottom_im2col, top_blob, kernel, opt); return; } #endif // Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; // permute Mat tmp; #if __aarch64__ #if __ARM_FEATURE_DOTPROD if (inch >= 8) { if (size >= 16) tmp.create(16 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size, 8u, 8, opt.workspace_allocator); } else if (inch >= 4) { if (size >= 16) tmp.create(16 * maxk, inch / 4 + inch % 4, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator); else tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator); } else { if (size >= 16) tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 1, opt.workspace_allocator); } #else // __ARM_FEATURE_DOTPROD if (inch >= 8) { if (size >= 4) tmp.create(4 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size, 8u, 8, opt.workspace_allocator); } else if (inch >= 4) { if (size >= 4) tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator); else tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator); } else { if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); } #endif // __ARM_FEATURE_DOTPROD #else // __aarch64__ if (inch >= 8) { if (size >= 2) tmp.create(2 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size, 8u, 8, opt.workspace_allocator); } else if (inch >= 4) { if (size >= 2) tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator); else tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator); } else { if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); } #endif // __aarch64__ { #if __aarch64__ #if __ARM_FEATURE_DOTPROD int nn_size = size >> 4; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 16; signed char* tmpptr = tmp.channel(i / 16); int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { asm volatile( "ld1 {v0.16b}, [%0] \n" "ld1 {v1.16b}, [%1] \n" "ld1 {v2.16b}, [%2] \n" "ld1 {v3.16b}, [%3] \n" "ld1 {v4.16b}, [%4] \n" "ld1 {v5.16b}, [%5] \n" "ld1 {v6.16b}, [%6] \n" "ld1 {v7.16b}, [%7] \n" "st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%8], #64 \n" "st4 {v4.16b, v5.16b, v6.16b, v7.16b}, [%8], #64 \n" : "=r"(img0), // %0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(img4), "=r"(img5), "=r"(img6), "=r"(img7), "=r"(tmpptr) // %8 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(img4), "5"(img5), "6"(img6), "7"(img7), "8"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { asm volatile( "ld1 {v0.16b}, [%0] \n" "ld1 {v1.16b}, [%1] \n" "ld1 {v2.16b}, [%2] \n" "ld1 {v3.16b}, [%3] \n" "st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%4], #64 \n" : "=r"(img0), // %0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(tmpptr) // %4 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.16b}, [%0] \n" "st1 {v0.16b}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size; } } } remain_size_start += nn_size << 4; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { asm volatile( "ld1 {v0.8b}, [%0] \n" "ld1 {v1.8b}, [%1] \n" "ld1 {v2.8b}, [%2] \n" "ld1 {v3.8b}, [%3] \n" "ld1 {v4.8b}, [%4] \n" "ld1 {v5.8b}, [%5] \n" "ld1 {v6.8b}, [%6] \n" "ld1 {v7.8b}, [%7] \n" "st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [%8], #32 \n" "st4 {v4.8b, v5.8b, v6.8b, v7.8b}, [%8], #32 \n" : "=r"(img0), // %0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(img4), "=r"(img5), "=r"(img6), "=r"(img7), "=r"(tmpptr) // %8 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(img4), "5"(img5), "6"(img6), "7"(img7), "8"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { asm volatile( "ld1 {v0.8b}, [%0] \n" "ld1 {v1.8b}, [%1] \n" "ld1 {v2.8b}, [%2] \n" "ld1 {v3.8b}, [%3] \n" "st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [%4], #32 \n" : "=r"(img0), // %0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(tmpptr) // %4 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "st1 {v0.8b}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #else // __ARM_FEATURE_DOTPROD int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 2; #endif // __ARM_FEATURE_DOTPROD #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else signed char* tmpptr = tmp.channel(i / 4); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; tmpptr[0] = img0[2]; tmpptr[1] = img1[2]; tmpptr[2] = img2[2]; tmpptr[3] = img3[2]; tmpptr[4] = img0[3]; tmpptr[5] = img1[3]; tmpptr[6] = img2[3]; tmpptr[7] = img3[3]; tmpptr += 8; tmpptr[0] = img4[0]; tmpptr[1] = img5[0]; tmpptr[2] = img6[0]; tmpptr[3] = img7[0]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; tmpptr[0] = img4[2]; tmpptr[1] = img5[2]; tmpptr[2] = img6[2]; tmpptr[3] = img7[2]; tmpptr[4] = img4[3]; tmpptr[5] = img5[3]; tmpptr[6] = img6[3]; tmpptr[7] = img7[3]; tmpptr += 8; #else tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; tmpptr[0] = img0[1]; tmpptr[1] = img1[1]; tmpptr[2] = img2[1]; tmpptr[3] = img3[1]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; tmpptr[0] = img0[2]; tmpptr[1] = img1[2]; tmpptr[2] = img2[2]; tmpptr[3] = img3[2]; tmpptr[4] = img4[2]; tmpptr[5] = img5[2]; tmpptr[6] = img6[2]; tmpptr[7] = img7[2]; tmpptr += 8; tmpptr[0] = img0[3]; tmpptr[1] = img1[3]; tmpptr[2] = img2[3]; tmpptr[3] = img3[3]; tmpptr[4] = img4[3]; tmpptr[5] = img5[3]; tmpptr[6] = img6[3]; tmpptr[7] = img7[3]; tmpptr += 8; #endif // __ARM_FEATURE_DOTPROD img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; tmpptr[0] = img0[2]; tmpptr[1] = img1[2]; tmpptr[2] = img2[2]; tmpptr[3] = img3[2]; tmpptr[4] = img0[3]; tmpptr[5] = img1[3]; tmpptr[6] = img2[3]; tmpptr[7] = img3[3]; tmpptr += 8; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += size; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #else int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 1; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else signed char* tmpptr = tmp.channel(i / 2); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; tmpptr[0] = img4[0]; tmpptr[1] = img5[0]; tmpptr[2] = img6[0]; tmpptr[3] = img7[0]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; #else tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; tmpptr[0] = img0[1]; tmpptr[1] = img1[1]; tmpptr[2] = img2[1]; tmpptr[3] = img3[1]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; #endif // __ARM_FEATURE_DOTPROD img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += size; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr0 = top_blob.channel(p); int i = 0; #if __aarch64__ #if __ARM_FEATURE_DOTPROD for (; i + 15 < size; i += 16) { const signed char* tmpptr = tmp.channel(i / 16); const signed char* kptr0 = kernel.channel(p); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "cmp %w1, #0 \n" "beq 1f \n" "ld1 {v8.16b}, [%5], #16 \n" // _w0123_l "ld1 {v0.16b}, [%4], #16 \n" // _val0123_l "0: \n" "ld1 {v1.16b}, [%4], #16 \n" // _val4567_l "sdot v16.4s, v8.16b, v0.4b[0] \n" "sdot v17.4s, v8.16b, v0.4b[1] \n" "sdot v18.4s, v8.16b, v0.4b[2] \n" "sdot v19.4s, v8.16b, v0.4b[3] \n" "ld1 {v2.16b}, [%4], #16 \n" // _val891011_l "sdot v20.4s, v8.16b, v1.4b[0] \n" "sdot v21.4s, v8.16b, v1.4b[1] \n" "sdot v22.4s, v8.16b, v1.4b[2] \n" "sdot v23.4s, v8.16b, v1.4b[3] \n" "ld1 {v3.16b}, [%4], #16 \n" // _val12131415_l "sdot v24.4s, v8.16b, v2.4b[0] \n" "sdot v25.4s, v8.16b, v2.4b[1] \n" "ld1 {v9.16b}, [%5], #16 \n" // _w0123_h "sdot v26.4s, v8.16b, v2.4b[2] \n" "sdot v27.4s, v8.16b, v2.4b[3] \n" "ld1 {v4.16b}, [%4], #16 \n" // _val0123_h "sdot v28.4s, v8.16b, v3.4b[0] \n" "sdot v29.4s, v8.16b, v3.4b[1] \n" "sdot v30.4s, v8.16b, v3.4b[2] \n" "sdot v31.4s, v8.16b, v3.4b[3] \n" "ld1 {v5.16b}, [%4], #16 \n" // _val4567_h "sdot v16.4s, v9.16b, v4.4b[0] \n" "sdot v17.4s, v9.16b, v4.4b[1] \n" "sdot v18.4s, v9.16b, v4.4b[2] \n" "sdot v19.4s, v9.16b, v4.4b[3] \n" "ld1 {v6.16b}, [%4], #16 \n" // _val891011_h "sdot v20.4s, v9.16b, v5.4b[0] \n" "sdot v21.4s, v9.16b, v5.4b[1] \n" "sdot v22.4s, v9.16b, v5.4b[2] \n" "sdot v23.4s, v9.16b, v5.4b[3] \n" "ld1 {v7.16b}, [%4], #16 \n" // _val12131415_h "sdot v24.4s, v9.16b, v6.4b[0] \n" "sdot v25.4s, v9.16b, v6.4b[1] \n" "ld1 {v8.16b}, [%5], #16 \n" // _w0123_l "sdot v26.4s, v9.16b, v6.4b[2] \n" "sdot v27.4s, v9.16b, v6.4b[3] \n" "ld1 {v0.16b}, [%4], #16 \n" // _val0123_l "sdot v28.4s, v9.16b, v7.4b[0] \n" "sdot v29.4s, v9.16b, v7.4b[1] \n" "subs %w1, %w1, #1 \n" "sdot v30.4s, v9.16b, v7.4b[2] \n" "sdot v31.4s, v9.16b, v7.4b[3] \n" "bne 0b \n" "sub %4, %4, #16 \n" "sub %5, %5, #16 \n" "1: \n" "cmp %w2, #0 \n" "beq 3f \n" "2: \n" "ld1 {v8.16b}, [%5], #16 \n" "ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%4], #64 \n" "sdot v16.4s, v8.16b, v0.4b[0] \n" "sdot v17.4s, v8.16b, v0.4b[1] \n" "sdot v18.4s, v8.16b, v0.4b[2] \n" "sdot v19.4s, v8.16b, v0.4b[3] \n" "sdot v20.4s, v8.16b, v1.4b[0] \n" "sdot v21.4s, v8.16b, v1.4b[1] \n" "sdot v22.4s, v8.16b, v1.4b[2] \n" "sdot v23.4s, v8.16b, v1.4b[3] \n" "sdot v24.4s, v8.16b, v2.4b[0] \n" "sdot v25.4s, v8.16b, v2.4b[1] \n" "sdot v26.4s, v8.16b, v2.4b[2] \n" "sdot v27.4s, v8.16b, v2.4b[3] \n" "sdot v28.4s, v8.16b, v3.4b[0] \n" "sdot v29.4s, v8.16b, v3.4b[1] \n" "subs %w2, %w2, #1 \n" "sdot v30.4s, v8.16b, v3.4b[2] \n" "sdot v31.4s, v8.16b, v3.4b[3] \n" "bne 2b \n" "3: \n" "lsr w4, %w3, #2 \n" // w4 = nn1 >> 2 "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v8.8b, v9.8b}, [%5], #16 \n" "ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%4], #64 \n" "uzp1 v10.8b, v8.8b, v9.8b \n" "uzp2 v11.8b, v8.8b, v9.8b \n" "uzp1 v4.16b, v0.16b, v1.16b \n" "uzp2 v5.16b, v0.16b, v1.16b \n" "uzp1 v6.16b, v2.16b, v3.16b \n" "uzp2 v7.16b, v2.16b, v3.16b \n" "uzp1 v8.8b, v10.8b, v11.8b \n" "uzp2 v9.8b, v10.8b, v11.8b \n" "uzp1 v0.16b, v4.16b, v5.16b \n" // 0 1 4 5 "uzp2 v1.16b, v4.16b, v5.16b \n" // 8 9 c d "mov v8.d[1], v9.d[0] \n" // _w "uzp1 v2.16b, v6.16b, v7.16b \n" // 2 3 6 7 "uzp2 v3.16b, v6.16b, v7.16b \n" // a b e f "sdot v16.4s, v8.16b, v0.4b[0] \n" "sdot v17.4s, v8.16b, v0.4b[1] \n" "sdot v18.4s, v8.16b, v2.4b[0] \n" "sdot v19.4s, v8.16b, v2.4b[1] \n" "sdot v20.4s, v8.16b, v0.4b[2] \n" "sdot v21.4s, v8.16b, v0.4b[3] \n" "sdot v22.4s, v8.16b, v2.4b[2] \n" "sdot v23.4s, v8.16b, v2.4b[3] \n" "sdot v24.4s, v8.16b, v1.4b[0] \n" "sdot v25.4s, v8.16b, v1.4b[1] \n" "sdot v26.4s, v8.16b, v3.4b[0] \n" "sdot v27.4s, v8.16b, v3.4b[1] \n" "sdot v28.4s, v8.16b, v1.4b[2] \n" "sdot v29.4s, v8.16b, v1.4b[3] \n" "sdot v30.4s, v8.16b, v3.4b[2] \n" "sdot v31.4s, v8.16b, v3.4b[3] \n" "subs w4, w4, #1 \n" "bne 4b \n" "5: \n" "and w4, %w3, #3 \n" // w4 = remain = nn1 & 3 "cmp w4, #0 \n" // w4 > 0 "beq 7f \n" "6: \n" "ld1 {v1.8b}, [%5] \n" "ld1 {v0.16b}, [%4] \n" "sshll v1.8h, v1.8b, #0 \n" "sshll v2.8h, v0.8b, #0 \n" "sshll2 v3.8h, v0.16b, #0 \n" "smlal v16.4s, v1.4h, v2.h[0] \n" "smlal v17.4s, v1.4h, v2.h[1] \n" "smlal v18.4s, v1.4h, v2.h[2] \n" "smlal v19.4s, v1.4h, v2.h[3] \n" "smlal v20.4s, v1.4h, v2.h[4] \n" "smlal v21.4s, v1.4h, v2.h[5] \n" "smlal v22.4s, v1.4h, v2.h[6] \n" "smlal v23.4s, v1.4h, v2.h[7] \n" "smlal v24.4s, v1.4h, v3.h[0] \n" "smlal v25.4s, v1.4h, v3.h[1] \n" "smlal v26.4s, v1.4h, v3.h[2] \n" "smlal v27.4s, v1.4h, v3.h[3] \n" "smlal v28.4s, v1.4h, v3.h[4] \n" "smlal v29.4s, v1.4h, v3.h[5] \n" "smlal v30.4s, v1.4h, v3.h[6] \n" "smlal v31.4s, v1.4h, v3.h[7] \n" "add %4, %4, #16 \n" "add %5, %5, #4 \n" "subs w4, w4, #1 \n" "bne 6b \n" "7: \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n" : "=r"(outptr0), "=r"(nn), "=r"(nn4), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(nn), "2"(nn4), "3"(nn1), "4"(tmpptr), "5"(kptr0) : "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const signed char* kptr0 = kernel.channel(p); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_l, _val4567_l, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_l, _val4567_l, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_l, _val4567_l, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_l, _val4567_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 32); int8x16_t _val4567_h = vld1q_s8(tmpptr + 48); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_h, _val4567_h, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_h, _val4567_h, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_h, _val4567_h, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_h, _val4567_h, 3); tmpptr += 64; kptr0 += 32; } for (int j = 0; j < nn4; j++) { int8x16_t _val0123 = vld1q_s8(tmpptr); int8x16_t _val4567 = vld1q_s8(tmpptr + 16); int8x16_t _w0 = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0, _val0123, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0, _val0123, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0, _val0123, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0, _val0123, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0, _val4567, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0, _val4567, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0, _val4567, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0, _val4567, 3); tmpptr += 32; kptr0 += 16; } int j = 0; for (; j + 3 < nn1; j += 4) { int8x8x4_t _val4 = vld4_s8(tmpptr); int8x8x2_t _val0145 = vuzp_s8(_val4.val[0], _val4.val[1]); int8x8x2_t _val2367 = vuzp_s8(_val4.val[2], _val4.val[3]); int8x16_t _val0123 = vcombine_s8(_val0145.val[0], _val2367.val[0]); int8x16_t _val4567 = vcombine_s8(_val0145.val[1], _val2367.val[1]); int8x16_t _w = vld1q_s8(kptr0); int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w)); int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]); int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]); _sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123f, _val4567, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123f, _val4567, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123f, _val4567, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123f, _val4567, 3); tmpptr += 32; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _val2 = vdup_n_s16(tmpptr[2]); int16x4_t _val3 = vdup_n_s16(tmpptr[3]); int16x4_t _val4 = vdup_n_s16(tmpptr[4]); int16x4_t _val5 = vdup_n_s16(tmpptr[5]); int16x4_t _val6 = vdup_n_s16(tmpptr[6]); int16x4_t _val7 = vdup_n_s16(tmpptr[7]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val0, _w0123); _sum1 = vmlal_s16(_sum1, _val1, _w0123); _sum2 = vmlal_s16(_sum2, _val2, _w0123); _sum3 = vmlal_s16(_sum3, _val3, _w0123); _sum4 = vmlal_s16(_sum4, _val4, _w0123); _sum5 = vmlal_s16(_sum5, _val5, _w0123); _sum6 = vmlal_s16(_sum6, _val6, _w0123); _sum7 = vmlal_s16(_sum7, _val7, _w0123); tmpptr += 8; kptr0 += 4; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); vst1q_s32(outptr0 + 8, _sum2); vst1q_s32(outptr0 + 12, _sum3); vst1q_s32(outptr0 + 16, _sum4); vst1q_s32(outptr0 + 20, _sum5); vst1q_s32(outptr0 + 24, _sum6); vst1q_s32(outptr0 + 28, _sum7); outptr0 += 32; } #endif for (; i + 3 < size; i += 4) { #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else const signed char* tmpptr = tmp.channel(i / 4); #endif const signed char* kptr0 = kernel.channel(p); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 16); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); tmpptr += 32; kptr0 += 32; } for (int j = 0; j < nn4; j++) { int8x16_t _val0123 = vld1q_s8(tmpptr); int8x16_t _w0 = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0, _val0123, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0, _val0123, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0, _val0123, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0, _val0123, 3); tmpptr += 16; kptr0 += 16; } int j = 0; for (; j + 3 < nn1; j += 4) { int8x16_t _val = vld1q_s8(tmpptr); int8x8x2_t _val01 = vuzp_s8(vget_low_s8(_val), vget_high_s8(_val)); int8x8x2_t _val0123 = vuzp_s8(_val01.val[0], _val01.val[1]); int8x16_t _val0123f = vcombine_s8(_val0123.val[0], _val0123.val[1]); int8x16_t _w = vld1q_s8(kptr0); int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w)); int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]); int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]); _sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123f, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123f, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123f, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123f, 3); tmpptr += 16; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _val2 = vdup_n_s16(tmpptr[2]); int16x4_t _val3 = vdup_n_s16(tmpptr[3]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val0, _w0123); _sum1 = vmlal_s16(_sum1, _val1, _w0123); _sum2 = vmlal_s16(_sum2, _val2, _w0123); _sum3 = vmlal_s16(_sum3, _val3, _w0123); tmpptr += 4; kptr0 += 4; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); vst1q_s32(outptr0 + 8, _sum2); vst1q_s32(outptr0 + 12, _sum3); outptr0 += 16; #else // __ARM_FEATURE_DOTPROD asm volatile( "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "cmp %w1, #0 \n" "beq 3f \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "prfm pldl1keep, [%4, #128] \n" "prfm pldl1keep, [%5, #256] \n" "lsr w4, %w1, #1 \n" // w4 = nn >> 1 "cmp w4, #0 \n" "beq 1f \n" "prfm pldl1keep, [%5, #512] \n" "add x5, %4, #16 \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v16.16b}, [%4] \n" // val L H "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%5], #64 \n" "add %4, %4, #32 \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "ld1 {v18.16b}, [%4] \n" "add %4, %4, #32 \n" "0: \n" "smull v24.8h, v16.8b, v20.8b \n" "prfm pldl1keep, [%5, #256] \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [%5, #512] \n" "smull v26.8h, v16.8b, v21.8b \n" "subs w4, w4, #1 \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "smlal v26.8h, v18.8b, v23.8b \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [x5] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add x5, x5, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v2.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [x5] \n" "smlal v30.8h, v19.8b, v23.8b \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "smull v24.8h, v16.8b, v20.8b \n" "add x5, x5, #32 \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [x5, #128] \n" "smull v26.8h, v16.8b, v21.8b \n" "prfm pldl1keep, [x5, #384] \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "sadalp v5.4s, v29.8h \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "sadalp v4.4s, v28.8h \n" "smlal v26.8h, v18.8b, v23.8b \n" "sadalp v7.4s, v31.8h \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "sadalp v6.4s, v30.8h \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [%4] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add %4, %4, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v10.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [%4] \n" "smlal v30.8h, v19.8b, v23.8b \n" "add %4, %4, #32 \n" "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%5], #64 \n" "sadalp v13.4s, v29.8h \n" "prfm pldl1keep, [%4, #128] \n" "sadalp v12.4s, v28.8h \n" "prfm pldl1keep, [%4, #384] \n" "sadalp v15.4s, v31.8h \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "sadalp v14.4s, v30.8h \n" "bne 0b \n" "sub %4, %4, #64 \n" "sub %5, %5, #64 \n" "1: \n" "and w4, %w1, #1 \n" // w4 = remain = nn & 1 "cmp w4, #0 \n" // w4 > 0 "beq 2f \n" "ld1 {v16.8b, v17.8b}, [%4], #16 \n" "ld1 {v20.8b, v21.8b, v22.8b, v23.8b}, [%5], #32 \n" "smull v24.8h, v16.8b, v20.8b \n" "smull v25.8h, v16.8b, v21.8b \n" "smull v26.8h, v16.8b, v22.8b \n" "ld1 {v18.8b, v19.8b}, [%4], #16 \n" "smull v27.8h, v16.8b, v23.8b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull v29.8h, v17.8b, v21.8b \n" "sadalp v2.4s, v26.8h \n" "smull v30.8h, v17.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smull v31.8h, v17.8b, v23.8b \n" "sadalp v4.4s, v28.8h \n" "smull v24.8h, v18.8b, v20.8b \n" "sadalp v5.4s, v29.8h \n" "smull v25.8h, v18.8b, v21.8b \n" "sadalp v6.4s, v30.8h \n" "smull v26.8h, v18.8b, v22.8b \n" "sadalp v7.4s, v31.8h \n" "smull v27.8h, v18.8b, v23.8b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v19.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull v29.8h, v19.8b, v21.8b \n" "sadalp v10.4s, v26.8h \n" "smull v30.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smull v31.8h, v19.8b, v23.8b \n" "sadalp v12.4s, v28.8h \n" "sadalp v13.4s, v29.8h \n" "sadalp v14.4s, v30.8h \n" "sadalp v15.4s, v31.8h \n" "2: \n" "addp v0.4s, v0.4s, v1.4s \n" "addp v2.4s, v2.4s, v3.4s \n" "addp v4.4s, v4.4s, v5.4s \n" "addp v6.4s, v6.4s, v7.4s \n" "addp v8.4s, v8.4s, v9.4s \n" "addp v10.4s, v10.4s, v11.4s \n" "addp v12.4s, v12.4s, v13.4s \n" "addp v14.4s, v14.4s, v15.4s \n" "addp v0.4s, v0.4s, v2.4s \n" "addp v1.4s, v4.4s, v6.4s \n" "addp v2.4s, v8.4s, v10.4s \n" "addp v3.4s, v12.4s, v14.4s \n" "3: \n" "cmp %w2, #0 \n" "beq 7f \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "lsr w4, %w2, #1 \n" // w4 = nn4 >> 1 "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v16.8b, v17.8b}, [%4], #16 \n" "ld1 {v22.8b, v23.8b}, [%5], #16 \n" "zip1 v18.2s, v16.2s, v16.2s \n" // _val00 "zip2 v19.2s, v16.2s, v16.2s \n" // _val11 "smull v24.8h, v18.8b, v22.8b \n" "smull v25.8h, v18.8b, v23.8b \n" "zip1 v20.2s, v17.2s, v17.2s \n" // _val22 "smull v26.8h, v19.8b, v22.8b \n" "smull v27.8h, v19.8b, v23.8b \n" "zip2 v21.2s, v17.2s, v17.2s \n" // _val33 "smull v28.8h, v20.8b, v22.8b \n" "smull v29.8h, v20.8b, v23.8b \n" "ld1 {v16.8b, v17.8b}, [%4], #16 \n" "smull v30.8h, v21.8b, v22.8b \n" "smull v31.8h, v21.8b, v23.8b \n" "ld1 {v22.8b, v23.8b}, [%5], #16 \n" "zip1 v18.2s, v16.2s, v16.2s \n" // _val44 "zip2 v19.2s, v16.2s, v16.2s \n" // _val55 "smlal v24.8h, v18.8b, v22.8b \n" "smlal v25.8h, v18.8b, v23.8b \n" "zip1 v20.2s, v17.2s, v17.2s \n" // _val66 "smlal v26.8h, v19.8b, v22.8b \n" "smlal v27.8h, v19.8b, v23.8b \n" "zip2 v21.2s, v17.2s, v17.2s \n" // _val77 "sadalp v8.4s, v24.8h \n" "smlal v28.8h, v20.8b, v22.8b \n" "sadalp v9.4s, v25.8h \n" "smlal v29.8h, v20.8b, v23.8b \n" "sadalp v10.4s, v26.8h \n" "smlal v30.8h, v21.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smlal v31.8h, v21.8b, v23.8b \n" "sadalp v12.4s, v28.8h \n" "sadalp v13.4s, v29.8h \n" "subs w4, w4, #1 \n" "sadalp v14.4s, v30.8h \n" "sadalp v15.4s, v31.8h \n" "bne 4b \n" "5: \n" "and w4, %w2, #1 \n" // w4 = remain = nn4 & 1 "cmp w4, #0 \n" // w4 > 0 "beq 6f \n" "ld1 {v16.8b, v17.8b}, [%4], #16 \n" "ld1 {v22.8b, v23.8b}, [%5], #16 \n" "zip1 v18.2s, v16.2s, v16.2s \n" // _val00 "zip2 v19.2s, v16.2s, v16.2s \n" // _val11 "smull v24.8h, v18.8b, v22.8b \n" "smull v25.8h, v18.8b, v23.8b \n" "zip1 v20.2s, v17.2s, v17.2s \n" // _val22 "smull v26.8h, v19.8b, v22.8b \n" "smull v27.8h, v19.8b, v23.8b \n" "zip2 v21.2s, v17.2s, v17.2s \n" // _val33 "sadalp v8.4s, v24.8h \n" "smull v28.8h, v20.8b, v22.8b \n" "sadalp v9.4s, v25.8h \n" "smull v29.8h, v20.8b, v23.8b \n" "sadalp v10.4s, v26.8h \n" "smull v30.8h, v21.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smull v31.8h, v21.8b, v23.8b \n" "sadalp v12.4s, v28.8h \n" "sadalp v13.4s, v29.8h \n" "sadalp v14.4s, v30.8h \n" "sadalp v15.4s, v31.8h \n" "6: \n" "addp v8.4s, v8.4s, v9.4s \n" "addp v10.4s, v10.4s, v11.4s \n" "addp v12.4s, v12.4s, v13.4s \n" "addp v14.4s, v14.4s, v15.4s \n" "add v0.4s, v0.4s, v8.4s \n" "add v1.4s, v1.4s, v10.4s \n" "add v2.4s, v2.4s, v12.4s \n" "add v3.4s, v3.4s, v14.4s \n" "7: \n" "lsr w4, %w3, #2 \n" // w4 = nn1 >> 2 "cmp w4, #0 \n" "beq 9f \n" "8: \n" "ld1 {v8.16b}, [%4], #16 \n" "ld1 {v9.16b}, [%5], #16 \n" "sshll v4.8h, v8.8b, #0 \n" "sshll2 v5.8h, v8.16b, #0 \n" "sshll v6.8h, v9.8b, #0 \n" "sshll2 v7.8h, v9.16b, #0 \n" "smlal v0.4s, v6.4h, v4.h[0] \n" "smlal v1.4s, v6.4h, v4.h[1] \n" "smlal v2.4s, v6.4h, v4.h[2] \n" "smlal v3.4s, v6.4h, v4.h[3] \n" "smlal2 v0.4s, v6.8h, v4.h[4] \n" "smlal2 v1.4s, v6.8h, v4.h[5] \n" "smlal2 v2.4s, v6.8h, v4.h[6] \n" "smlal2 v3.4s, v6.8h, v4.h[7] \n" "smlal v0.4s, v7.4h, v5.h[0] \n" "smlal v1.4s, v7.4h, v5.h[1] \n" "smlal v2.4s, v7.4h, v5.h[2] \n" "smlal v3.4s, v7.4h, v5.h[3] \n" "smlal2 v0.4s, v7.8h, v5.h[4] \n" "smlal2 v1.4s, v7.8h, v5.h[5] \n" "smlal2 v2.4s, v7.8h, v5.h[6] \n" "smlal2 v3.4s, v7.8h, v5.h[7] \n" "subs w4, w4, #1 \n" "bne 8b \n" "9: \n" "and w4, %w3, #3 \n" // w4 = nn1 & 3 "cmp w4, #0 \n" // w4 > 0 "beq 11f \n" "10: \n" "ld1 {v4.8b}, [%4] \n" "ld1 {v6.8b}, [%5] \n" "sshll v4.8h, v4.8b, #0 \n" "sshll v6.8h, v6.8b, #0 \n" "smlal v0.4s, v6.4h, v4.h[0] \n" "smlal v1.4s, v6.4h, v4.h[1] \n" "smlal v2.4s, v6.4h, v4.h[2] \n" "smlal v3.4s, v6.4h, v4.h[3] \n" "add %4, %4, #4 \n" "add %5, %5, #4 \n" "subs w4, w4, #1 \n" "bne 10b \n" "11: \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(outptr0), "=r"(nn), "=r"(nn4), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(nn), "2"(nn4), "3"(nn1), "4"(tmpptr), "5"(kptr0) : "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #endif // __ARM_FEATURE_DOTPROD } #endif // __aarch64__ for (; i + 1 < size; i += 2) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; #if __aarch64__ int32x4_t _sum00 = vdupq_n_s32(0); int32x4_t _sum10 = vdupq_n_s32(0); #if __ARM_FEATURE_DOTPROD for (int j = 0; j < nn; j++) { int8x16_t _val01_l_h = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum00 = vdotq_laneq_s32(_sum00, _w0123_l, _val01_l_h, 0); _sum10 = vdotq_laneq_s32(_sum10, _w0123_l, _val01_l_h, 1); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum00 = vdotq_laneq_s32(_sum00, _w0123_h, _val01_l_h, 2); _sum10 = vdotq_laneq_s32(_sum10, _w0123_h, _val01_l_h, 3); tmpptr += 16; kptr0 += 32; } if (nn4 > 0) { int j = 0; for (; j + 1 < nn4; j += 2) { int8x16_t _val0123 = vld1q_s8(tmpptr); int8x16_t _w0 = vld1q_s8(kptr0); _sum00 = vdotq_laneq_s32(_sum00, _w0, _val0123, 0); _sum10 = vdotq_laneq_s32(_sum10, _w0, _val0123, 1); int8x16_t _w1 = vld1q_s8(kptr0 + 16); _sum00 = vdotq_laneq_s32(_sum00, _w1, _val0123, 2); _sum10 = vdotq_laneq_s32(_sum10, _w1, _val0123, 3); tmpptr += 16; kptr0 += 32; } for (; j < nn4; j++) { int8x8_t _val01 = vld1_s8(tmpptr); int8x16_t _w0 = vld1q_s8(kptr0); _sum00 = vdotq_lane_s32(_sum00, _w0, _val01, 0); _sum10 = vdotq_lane_s32(_sum10, _w0, _val01, 1); tmpptr += 8; kptr0 += 16; } } #else // __ARM_FEATURE_DOTPROD if (nn > 0) { int32x4_t _sum01 = vdupq_n_s32(0); int32x4_t _sum02 = vdupq_n_s32(0); int32x4_t _sum03 = vdupq_n_s32(0); int32x4_t _sum11 = vdupq_n_s32(0); int32x4_t _sum12 = vdupq_n_s32(0); int32x4_t _sum13 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv00 = vmlal_s8(_wv00, vget_low_s8(_val1), vget_low_s8(_w45)); _wv01 = vmlal_s8(_wv01, vget_low_s8(_val1), vget_high_s8(_w45)); _wv02 = vmlal_s8(_wv02, vget_low_s8(_val1), vget_low_s8(_w67)); _wv03 = vmlal_s8(_wv03, vget_low_s8(_val1), vget_high_s8(_w67)); _wv10 = vmlal_s8(_wv10, vget_high_s8(_val1), vget_low_s8(_w45)); _wv11 = vmlal_s8(_wv11, vget_high_s8(_val1), vget_high_s8(_w45)); _wv12 = vmlal_s8(_wv12, vget_high_s8(_val1), vget_low_s8(_w67)); _wv13 = vmlal_s8(_wv13, vget_high_s8(_val1), vget_high_s8(_w67)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 32; kptr0 += 64; } for (; j < nn; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w23)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 16; kptr0 += 32; } int32x4_t _s001 = vpaddq_s32(_sum00, _sum01); int32x4_t _s023 = vpaddq_s32(_sum02, _sum03); int32x4_t _s101 = vpaddq_s32(_sum10, _sum11); int32x4_t _s123 = vpaddq_s32(_sum12, _sum13); _sum00 = vpaddq_s32(_s001, _s023); _sum10 = vpaddq_s32(_s101, _s123); } if (nn4 > 0) { int32x4_t _sum100 = vdupq_n_s32(0); int32x4_t _sum101 = vdupq_n_s32(0); int32x4_t _sum110 = vdupq_n_s32(0); int32x4_t _sum111 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn4; j += 2) { int8x16_t _val0123 = vld1q_s8(tmpptr); int32x4x2_t _val00221133 = vzipq_s32(vreinterpretq_s32_s8(_val0123), vreinterpretq_s32_s8(_val0123)); int8x8_t _val00 = vreinterpret_s8_s32(vget_low_s32(_val00221133.val[0])); int8x8_t _val11 = vreinterpret_s8_s32(vget_high_s32(_val00221133.val[0])); int8x8_t _val22 = vreinterpret_s8_s32(vget_low_s32(_val00221133.val[1])); int8x8_t _val33 = vreinterpret_s8_s32(vget_high_s32(_val00221133.val[1])); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(_val00, vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(_val00, vget_high_s8(_w01)); int16x8_t _wv10 = vmull_s8(_val11, vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(_val11, vget_high_s8(_w01)); _wv00 = vmlal_s8(_wv00, _val22, vget_low_s8(_w23)); _wv01 = vmlal_s8(_wv01, _val22, vget_high_s8(_w23)); _wv10 = vmlal_s8(_wv10, _val33, vget_low_s8(_w23)); _wv11 = vmlal_s8(_wv11, _val33, vget_high_s8(_w23)); _sum100 = vpadalq_s16(_sum100, _wv00); _sum101 = vpadalq_s16(_sum101, _wv01); _sum110 = vpadalq_s16(_sum110, _wv10); _sum111 = vpadalq_s16(_sum111, _wv11); tmpptr += 16; kptr0 += 32; } for (; j < nn4; j++) { int8x8_t _val01 = vld1_s8(tmpptr); int32x2x2_t _val0011 = vzip_s32(vreinterpret_s32_s8(_val01), vreinterpret_s32_s8(_val01)); int8x8_t _val00 = vreinterpret_s8_s32(_val0011.val[0]); int8x8_t _val11 = vreinterpret_s8_s32(_val0011.val[1]); int8x16_t _w01 = vld1q_s8(kptr0); int16x8_t _wv00 = vmull_s8(_val00, vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(_val00, vget_high_s8(_w01)); int16x8_t _wv10 = vmull_s8(_val11, vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(_val11, vget_high_s8(_w01)); _sum100 = vpadalq_s16(_sum100, _wv00); _sum101 = vpadalq_s16(_sum101, _wv01); _sum110 = vpadalq_s16(_sum110, _wv10); _sum111 = vpadalq_s16(_sum111, _wv11); tmpptr += 8; kptr0 += 16; } int32x4_t _s001 = vpaddq_s32(_sum100, _sum101); int32x4_t _s101 = vpaddq_s32(_sum110, _sum111); _sum00 = vaddq_s32(_sum00, _s001); _sum10 = vaddq_s32(_sum10, _s101); } #endif // __ARM_FEATURE_DOTPROD int j = 0; for (; j + 3 < nn1; j += 4) { int16x8_t _val01234567 = vmovl_s8(vld1_s8(tmpptr)); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w)); int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w)); int16x4_t _w0123 = vget_low_s16(_w01234567); int16x4_t _w4567 = vget_high_s16(_w01234567); int16x4_t _w89ab = vget_low_s16(_w89abcdef); int16x4_t _wcdef = vget_high_s16(_w89abcdef); _sum00 = vmlal_laneq_s16(_sum00, _w0123, _val01234567, 0); _sum10 = vmlal_laneq_s16(_sum10, _w0123, _val01234567, 1); _sum00 = vmlal_laneq_s16(_sum00, _w4567, _val01234567, 2); _sum10 = vmlal_laneq_s16(_sum10, _w4567, _val01234567, 3); _sum00 = vmlal_laneq_s16(_sum00, _w89ab, _val01234567, 4); _sum10 = vmlal_laneq_s16(_sum10, _w89ab, _val01234567, 5); _sum00 = vmlal_laneq_s16(_sum00, _wcdef, _val01234567, 6); _sum10 = vmlal_laneq_s16(_sum10, _wcdef, _val01234567, 7); tmpptr += 8; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum00 = vmlal_s16(_sum00, _val0, _w0123); _sum10 = vmlal_s16(_sum10, _val1, _w0123); tmpptr += 2; kptr0 += 4; } vst1q_s32(outptr0, _sum00); vst1q_s32(outptr0 + 4, _sum10); outptr0 += 8; #else // __aarch64__ asm volatile( "veor q0, q0 \n" "veor q1, q1 \n" "veor q2, q2 \n" "veor q3, q3 \n" "veor q4, q4 \n" "veor q5, q5 \n" "veor q6, q6 \n" "veor q7, q7 \n" "cmp %1, #0 \n" "beq 3f \n" "pld [%4, #256] \n" "lsr r4, %1, #1 \n" // r4 = nn = size >> 1 "cmp r4, #0 \n" "beq 1f \n" "add r5, %5, #16 \n" "pld [%5, #128] \n" "mov r6, #32 \n" "pld [%5, #384] \n" "vld1.s8 {d20-d21}, [%5 :128], r6 \n" // _w01 "vld1.s8 {d16-d19}, [%4 :128]! \n" // _val0 _val1 "vld1.s8 {d22-d23}, [%5 :128], r6 \n" // _w45 "0: \n" "vmull.s8 q12, d16, d20 \n" "pld [%4, #256] \n" "vmull.s8 q13, d16, d21 \n" "pld [%5, #384] \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d20-d21}, [r5 :128], r6 \n" // _w23 "vmlal.s8 q12, d18, d22 \n" "vmlal.s8 q13, d18, d23 \n" "subs r4, r4, #1 \n" "vmlal.s8 q14, d19, d22 \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d22-d23}, [r5 :128], r6 \n" // _w67 "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d20 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d21 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d20 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d16-d17}, [%4 :128]! \n" // _val0 "vmlal.s8 q12, d18, d22 \n" "vld1.s8 {d20-d21}, [%5 :128], r6 \n" // _w01 "vmlal.s8 q13, d18, d23 \n" "pld [r5, #128] \n" "vmlal.s8 q14, d19, d22 \n" "pld [r5, #384] \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d18-d19}, [%4 :128]! \n" // _val1 "vpadal.s16 q2, q12 \n" "vld1.s8 {d22-d23}, [%5 :128], r6 \n" // _w45 "vpadal.s16 q3, q13 \n" "pld [%4, #128] \n" "vpadal.s16 q6, q14 \n" "pld [%5, #128] \n" "vpadal.s16 q7, q15 \n" "bne 0b \n" "sub %4, %4, #32 \n" "sub %5, %5, #64 \n" "1: \n" "and r4, %1, #1 \n" // r4 = remain = size & 1 "cmp r4, #0 \n" // r4 > 0 "beq 2f \n" "vld1.s8 {d16-d17}, [%4 :128]! \n" // _val "vld1.s8 {d20-d21}, [%5 :128]! \n" // _w01 "vmull.s8 q12, d16, d20 \n" "vld1.s8 {d22-d23}, [%5 :128]! \n" // _w23 "vmull.s8 q13, d16, d21 \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d22 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d23 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d22 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d23 \n" "vpadal.s16 q2, q12 \n" "vpadal.s16 q3, q13 \n" "vpadal.s16 q6, q14 \n" "vpadal.s16 q7, q15 \n" "2: \n" "vpadd.s32 d16, d0, d1 \n" "vpadd.s32 d17, d2, d3 \n" "vpadd.s32 d18, d4, d5 \n" "vpadd.s32 d19, d6, d7 \n" "vpadd.s32 d20, d8, d9 \n" "vpadd.s32 d21, d10, d11 \n" "vpadd.s32 d22, d12, d13 \n" "vpadd.s32 d23, d14, d15 \n" "vpadd.s32 d0, d16, d17 \n" "vpadd.s32 d1, d18, d19 \n" "vpadd.s32 d2, d20, d21 \n" "vpadd.s32 d3, d22, d23 \n" "3: \n" "cmp %2, #0 \n" "beq 7f \n" "veor q2, q2 \n" "veor q3, q3 \n" "veor q4, q4 \n" "veor q5, q5 \n" "lsr r4, %2, #1 \n" // r4 = nn4 >> 1 "cmp r4, #0 \n" "beq 5f \n" "4: \n" "vld1.s8 {d16-d17}, [%4]! \n" // _val0123 "vld1.s8 {d20-d23}, [%5]! \n" // _w01 _w23 "vmov.s8 q9, q8 \n" "vtrn.s32 q8, q9 \n" // _val00 _val22 _val11 _val33 "vmull.s8 q12, d16, d20 \n" "vmull.s8 q13, d16, d21 \n" "vmull.s8 q14, d18, d20 \n" "vmull.s8 q15, d18, d21 \n" "vmlal.s8 q12, d17, d22 \n" "vmlal.s8 q13, d17, d23 \n" "vmlal.s8 q14, d19, d22 \n" "vmlal.s8 q15, d19, d23 \n" "vpadal.s16 q2, q12 \n" "vpadal.s16 q3, q13 \n" "vpadal.s16 q4, q14 \n" "vpadal.s16 q5, q15 \n" "subs r4, r4, #1 \n" "bne 4b \n" "5: \n" "and r4, %2, #1 \n" // r4 = nn4 & 1 "cmp r4, #0 \n" // r4 > 0 "beq 6f \n" "vld1.s8 {d16}, [%4]! \n" // _val01 "vld1.s8 {d18-d19}, [%5]! \n" // _w01 "vmov.s8 d17, d16 \n" "vtrn.s32 d16, d17 \n" // _val00 _val11 "vmull.s8 q12, d16, d18 \n" "vmull.s8 q13, d16, d19 \n" "vmull.s8 q14, d17, d18 \n" "vmull.s8 q15, d17, d19 \n" "vpadal.s16 q2, q12 \n" "vpadal.s16 q3, q13 \n" "vpadal.s16 q4, q14 \n" "vpadal.s16 q5, q15 \n" "6: \n" "vpadd.s32 d16, d4, d5 \n" "vpadd.s32 d17, d6, d7 \n" "vpadd.s32 d18, d8, d9 \n" "vpadd.s32 d19, d10, d11 \n" "vadd.s32 q0, q0, q8 \n" "vadd.s32 q1, q1, q9 \n" "7: \n" "lsr r4, %3, #2 \n" // r4 = nn1 >> 2 "cmp r4, #0 \n" "beq 9f \n" "8: \n" "vld1.s8 {d4}, [%4]! \n" "vmovl.s8 q2, d4 \n" "vld1.s8 {d10-d11}, [%5]! \n" "vmovl.s8 q3, d10 \n" "vmovl.s8 q4, d11 \n" "vmlal.s16 q0, d6, d4[0] \n" "vmlal.s16 q1, d6, d4[1] \n" "vmlal.s16 q0, d7, d4[2] \n" "vmlal.s16 q1, d7, d4[3] \n" "vmlal.s16 q0, d8, d5[0] \n" "vmlal.s16 q1, d8, d5[1] \n" "vmlal.s16 q0, d9, d5[2] \n" "vmlal.s16 q1, d9, d5[3] \n" "subs r4, r4, #1 \n" "bne 8b \n" "9: \n" "and r4, %3, #3 \n" // r4 = nn1 & 3 "cmp r4, #0 \n" // w4 > 0 "beq 11f \n" "10: \n" "vld1.s8 {d4[]}, [%4]! \n" "vld1.s8 {d6[]}, [%4]! \n" "vmovl.s8 q2, d4 \n" "vmovl.s8 q3, d6 \n" "vld1.s8 {d8}, [%5] \n" "vmovl.s8 q4, d8 \n" "vmlal.s16 q0, d4, d8 \n" "vmlal.s16 q1, d6, d8 \n" "add %5, %5, #4 \n" "subs r4, r4, #1 \n" "bne 10b \n" "11: \n" "vst1.s32 {d0-d3}, [%0 :128]! \n" : "=r"(outptr0), "=r"(nn), "=r"(nn4), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(nn), "2"(nn4), "3"(nn1), "4"(tmpptr), "5"(kptr0) : "memory", "r4", "r5", "r6", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); #if __ARM_FEATURE_DOTPROD for (int j = 0; j < nn; j++) { int8x8_t _val0_l_h = vld1_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _w0123_l, _val0_l_h, 0); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_lane_s32(_sum0, _w0123_h, _val0_l_h, 1); tmpptr += 8; kptr0 += 32; } if (nn4 > 0) { int j = 0; for (; j + 1 < nn4; j += 2) { int8x8_t _val01 = vld1_s8(tmpptr); int8x16_t _w0 = vld1q_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _w0, _val01, 0); int8x16_t _w1 = vld1q_s8(kptr0 + 16); _sum0 = vdotq_lane_s32(_sum0, _w1, _val01, 1); tmpptr += 8; kptr0 += 32; } for (; j < nn4; j++) { int8x8_t _val_xxx = vld1_s8(tmpptr); int8x16_t _w0 = vld1q_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _w0, _val_xxx, 0); tmpptr += 4; kptr0 += 16; } } #else // __ARM_FEATURE_DOTPROD if (nn > 0) { int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv0 = vmlal_s8(_wv0, vget_high_s8(_val), vget_low_s8(_w45)); _wv1 = vmlal_s8(_wv1, vget_high_s8(_val), vget_high_s8(_w45)); _wv2 = vmlal_s8(_wv2, vget_high_s8(_val), vget_low_s8(_w67)); _wv3 = vmlal_s8(_wv3, vget_high_s8(_val), vget_high_s8(_w67)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 16; kptr0 += 64; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(_val, vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(_val, vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(_val, vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(_val, vget_high_s8(_w23)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 8; kptr0 += 32; } #if __aarch64__ int32x4_t _s01 = vpaddq_s32(_sum0, _sum1); int32x4_t _s23 = vpaddq_s32(_sum2, _sum3); _sum0 = vpaddq_s32(_s01, _s23); #else int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum1), vget_high_s32(_sum1)); int32x2_t _s23_low = vpadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _s23_high = vpadd_s32(vget_low_s32(_sum3), vget_high_s32(_sum3)); _sum0 = vcombine_s32(vpadd_s32(_s01_low, _s01_high), vpadd_s32(_s23_low, _s23_high)); #endif } if (nn4 > 0) { int32x4_t _sum10 = vdupq_n_s32(0); int32x4_t _sum11 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn4; j += 2) { int8x8_t _val01 = vld1_s8(tmpptr); int32x2x2_t _val0011 = vzip_s32(vreinterpret_s32_s8(_val01), vreinterpret_s32_s8(_val01)); int8x8_t _val00 = vreinterpret_s8_s32(_val0011.val[0]); int8x8_t _val11 = vreinterpret_s8_s32(_val0011.val[1]); int8x16_t _w0 = vld1q_s8(kptr0); int8x16_t _w1 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(_val00, vget_low_s8(_w0)); int16x8_t _wv1 = vmull_s8(_val00, vget_high_s8(_w0)); _wv0 = vmlal_s8(_wv0, _val11, vget_low_s8(_w1)); _wv1 = vmlal_s8(_wv1, _val11, vget_high_s8(_w1)); _sum10 = vpadalq_s16(_sum10, _wv0); _sum11 = vpadalq_s16(_sum11, _wv1); tmpptr += 8; kptr0 += 32; } for (; j < nn4; j++) { int8x8_t _val_xxx = vld1_s8(tmpptr); int8x8_t _val_val = vreinterpret_s8_s32(vzip_s32(vreinterpret_s32_s8(_val_xxx), vreinterpret_s32_s8(_val_xxx)).val[0]); int8x16_t _w0 = vld1q_s8(kptr0); int16x8_t _wv0 = vmull_s8(_val_val, vget_low_s8(_w0)); int16x8_t _wv1 = vmull_s8(_val_val, vget_high_s8(_w0)); _sum10 = vpadalq_s16(_sum10, _wv0); _sum11 = vpadalq_s16(_sum11, _wv1); tmpptr += 4; kptr0 += 16; } #if __aarch64__ int32x4_t _s01 = vpaddq_s32(_sum10, _sum11); #else int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum10), vget_high_s32(_sum10)); int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum11), vget_high_s32(_sum11)); int32x4_t _s01 = vcombine_s32(_s01_low, _s01_high); #endif _sum0 = vaddq_s32(_sum0, _s01); } #endif // __ARM_FEATURE_DOTPROD int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j + 3 < nn1; j += 4) { int16x4_t _val0123 = vget_low_s16(vmovl_s8(vld1_s8(tmpptr))); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w)); int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w)); int16x4_t _w0123 = vget_low_s16(_w01234567); int16x4_t _w4567 = vget_high_s16(_w01234567); int16x4_t _w89ab = vget_low_s16(_w89abcdef); int16x4_t _wcdef = vget_high_s16(_w89abcdef); _sum0 = vmlal_lane_s16(_sum0, _w0123, _val0123, 0); _sum1 = vmlal_lane_s16(_sum1, _w4567, _val0123, 1); _sum0 = vmlal_lane_s16(_sum0, _w89ab, _val0123, 2); _sum1 = vmlal_lane_s16(_sum1, _wcdef, _val0123, 3); tmpptr += 4; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val = vdup_n_s16(tmpptr[0]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val, _w0123); tmpptr += 1; kptr0 += 4; } _sum0 = vaddq_s32(_sum0, _sum1); vst1q_s32(outptr0, _sum0); outptr0 += 4; } } } static void convolution_im2col_sgemm_transform_kernel_pack1to4_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { #if NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD if (ncnn::cpu_support_arm_asimddp()) { extern void convolution_im2col_sgemm_transform_kernel_pack1to4_int8_neon_arm82dot(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h); convolution_im2col_sgemm_transform_kernel_pack1to4_int8_neon_arm82dot(_kernel, kernel_tm, inch, outch, kernel_w, kernel_h); return; } #endif const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 8a-4b-maxk-inch/8a-outch/4b // dst = 4a-4b-2-maxk-inch/8a-outch/4b (arm82) Mat kernel = _kernel.reshape(maxk, inch, outch); if (inch >= 8) kernel_tm.create(32 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, outch / 4, (size_t)1u); if (inch >= 4) kernel_tm.create(16 * maxk, inch / 4 + inch % 4, outch / 4, (size_t)1u); else kernel_tm.create(4 * maxk, inch, outch / 4, (size_t)1u); for (int q = 0; q + 3 < outch; q += 4) { signed char* g00 = kernel_tm.channel(q / 4); int p = 0; for (; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } for (int i = 0; i < 4; i++) { for (int j = 4; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } #else for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } #endif } } for (; p + 3 < inch; p += 4) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } } } for (; p < inch; p++) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p); g00[0] = k00[k]; g00++; } } } } } static void convolution_im2col_sgemm_pack1to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; ptr[2] = sptr[stride_w * 2]; ptr[3] = sptr[stride_w * 3]; sptr += stride_w * 4; ptr += 4; } for (; j + 1 < outw; j += 2) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; sptr += stride_w * 2; ptr += 2; } for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_pack1to4_int8_neon(bottom_im2col, top_blob, kernel, opt); }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32));t3<=min(min(min(floord(4*t2+Ny,32),floord(Nt+Ny-4,32)),floord(2*t1+Ny+1,32)),floord(4*t1-4*t2+Nz+Ny-1,32));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32)),ceild(32*t3-Ny-28,32));t4<=min(min(min(min(floord(4*t2+Nx,32),floord(Nt+Nx-4,32)),floord(2*t1+Nx+1,32)),floord(32*t3+Nx+28,32)),floord(4*t1-4*t2+Nz+Nx-1,32));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),32*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),32*t3+30),32*t4+30),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
deprecate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE % % D D E P P R R E C A A T E % % D D EEE PPPPP RRRR EEE C AAAAA T EEE % % D D E P R R E C A A T E % % DDDD EEEEE P R R EEEEE CCCC A A T EEEEE % % % % % % MagickWand Deprecated Methods % % % % Software Design % % John Cristy % % October 2002 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define PixelViewId "PixelView" #define ThrowWandException(severity,tag,context) \ { \ (void) ThrowMagickException(wand->exception,GetMagickModule(),severity, \ tag,"`%s'",context); \ return(MagickFalse); \ } /* Typedef declarations. */ struct _PixelView { size_t id; char name[MaxTextExtent]; ExceptionInfo *exception; MagickWand *wand; CacheView *view; RectangleInfo region; size_t number_threads; PixelWand ***pixel_wands; MagickBooleanType debug; size_t signature; }; #if !defined(MAGICKCORE_EXCLUDE_DEPRECATED) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k A v e r a g e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickAverageImages() average a set of images. % % The format of the MagickAverageImages method is: % % MagickWand *MagickAverageImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ static MagickWand *CloneMagickWandFromImages(const MagickWand *wand, Image *images) { MagickWand *clone_wand; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand)); if (clone_wand == (MagickWand *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", images->filename); (void) ResetMagickMemory(clone_wand,0,sizeof(*clone_wand)); clone_wand->id=AcquireWandId(); (void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g", MagickWandId,(double) clone_wand->id); clone_wand->exception=AcquireExceptionInfo(); InheritException(clone_wand->exception,wand->exception); clone_wand->image_info=CloneImageInfo(wand->image_info); clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info); clone_wand->images=images; clone_wand->debug=IsEventLogging(); if (clone_wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name); clone_wand->signature=WandSignature; return(clone_wand); } WandExport MagickWand *MagickAverageImages(MagickWand *wand) { Image *average_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); average_image=EvaluateImages(wand->images,MeanEvaluateOperator, wand->exception); if (average_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,average_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelView() makes a copy of the specified pixel view. % % The format of the ClonePixelView method is: % % PixelView *ClonePixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelView *ClonePixelView(const PixelView *pixel_view) { PixelView *clone_view; register ssize_t i; assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) clone_view->id); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,pixel_view->exception); clone_view->view=CloneCacheView(pixel_view->view); clone_view->region=pixel_view->region; clone_view->number_threads=pixel_view->number_threads; for (i=0; i < (ssize_t) pixel_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) pixel_view->pixel_wands[i],pixel_view->region.width); clone_view->debug=pixel_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelView() deallocates memory associated with a pixel view. % % The format of the DestroyPixelView method is: % % PixelView *DestroyPixelView(PixelView *pixel_view, % const size_t number_wands,const size_t number_threads) % % A description of each parameter follows: % % o pixel_view: the pixel view. % % o number_wand: the number of pixel wands. % % o number_threads: number of threads. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport PixelView *DestroyPixelView(PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands, pixel_view->region.width,pixel_view->number_threads); pixel_view->view=DestroyCacheView(pixel_view->view); pixel_view->exception=DestroyExceptionInfo(pixel_view->exception); pixel_view->signature=(~WandSignature); RelinquishWandId(pixel_view->id); pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view); return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferPixelViewIterator() iterates over three pixel views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel region is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination pixel view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferPixelViewIterator method is: % % MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source, % PixelView *duplex,PixelView *destination, % DuplexTransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o duplex: the duplex pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferPixelViewIterator( PixelView *source,PixelView *duplex,PixelView *destination, DuplexTransferPixelViewMethod transfer,void *context) { #define DuplexTransferPixelViewTag "PixelView/DuplexTransfer" ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict duplex_indexes, *restrict indexes; register const PixelPacket *restrict duplex_pixels, *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y, duplex->region.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (transfer(source,duplex,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_DuplexTransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag, progress++,source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewException() returns the severity, reason, and description of any % error that occurs when utilizing a pixel view. % % The format of the GetPixelViewException method is: % % char *GetPixelViewException(const PixelWand *pixel_view, % ExceptionType *severity) % % A description of each parameter follows: % % o pixel_view: the pixel pixel_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetPixelViewException(const PixelView *pixel_view, ExceptionType *severity) { char *description; assert(pixel_view != (const PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); assert(severity != (ExceptionType *) NULL); *severity=pixel_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); *description='\0'; if (pixel_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->reason), MaxTextExtent); if (pixel_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w H e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewHeight() returns the pixel view height. % % The format of the GetPixelViewHeight method is: % % size_t GetPixelViewHeight(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewHeight(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.height); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewIterator() iterates over the pixel view in parallel and calls % your get method for each scanline of the view. The pixel region is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetPixelViewIterator method is: % % MagickBooleanType GetPixelViewIterator(PixelView *source, % GetPixelViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetPixelViewIterator(PixelView *source, GetPixelViewMethod get,void *context) { #define GetPixelViewTag "PixelView/Get" Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (get == (GetPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (get(source,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_GetPixelViewIterator) #endif proceed=SetImageProgress(source_image,GetPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewPixels() returns the pixel view pixel_wands. % % The format of the GetPixelViewPixels method is: % % PixelWand *GetPixelViewPixels(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view) { const int id = GetOpenMPThreadId(); assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWand() returns the magick wand associated with the pixel view. % % The format of the GetPixelViewWand method is: % % MagickWand *GetPixelViewWand(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W i d t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWidth() returns the pixel view width. % % The format of the GetPixelViewWidth method is: % % size_t GetPixelViewWidth(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewWidth(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w X % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewX() returns the pixel view x offset. % % The format of the GetPixelViewX method is: % % ssize_t GetPixelViewX(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewX(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.x); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w Y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewY() returns the pixel view y offset. % % The format of the GetPixelViewY method is: % % ssize_t GetPixelViewY(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewY(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.y); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPixelView() returns MagickTrue if the the parameter is verified as a pixel % view container. % % The format of the IsPixelView method is: % % MagickBooleanType IsPixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view) { size_t length; if (pixel_view == (const PixelView *) NULL) return(MagickFalse); if (pixel_view->signature != WandSignature) return(MagickFalse); length=strlen(PixelViewId); if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C l i p P a t h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickClipPathImage() clips along the named paths from the 8BIM profile, if % present. Later operations take effect inside the path. Id may be a number % if preceded with #, to work on a numbered path, e.g., "#1" to use the first % path. % % The format of the MagickClipPathImage method is: % % MagickBooleanType MagickClipPathImage(MagickWand *wand, % const char *pathname,const MagickBooleanType inside) % % A description of each parameter follows: % % o wand: the magick wand. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand, const char *pathname,const MagickBooleanType inside) { return(MagickClipImagePath(wand,pathname,inside)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetFillAlpha() returns the alpha used when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawGetFillAlpha method is: % % double DrawGetFillAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport double DrawGetFillAlpha(const DrawingWand *wand) { return(DrawGetFillOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetStrokeAlpha() returns the alpha of stroked object outlines. % % The format of the DrawGetStrokeAlpha method is: % % double DrawGetStrokeAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. */ WandExport double DrawGetStrokeAlpha(const DrawingWand *wand) { return(DrawGetStrokeOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P e e k G r a p h i c W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPeekGraphicWand() returns the current drawing wand. % % The format of the PeekDrawingWand method is: % % DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) { return(PeekDrawingWand(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P o p G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPopGraphicContext() destroys the current drawing wand and returns to the % previously pushed drawing wand. Multiple drawing wands may exist. It is an % error to attempt to pop more drawing wands than have been pushed, and it is % proper form to pop all drawing wands which have been pushed. % % The format of the DrawPopGraphicContext method is: % % MagickBooleanType DrawPopGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPopGraphicContext(DrawingWand *wand) { (void) PopDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P u s h G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPushGraphicContext() clones the current drawing wand to create a new % drawing wand. The original drawing wand(s) may be returned to by % invoking PopDrawingWand(). The drawing wands are stored on a drawing wand % stack. For every Pop there must have already been an equivalent Push. % % The format of the DrawPushGraphicContext method is: % % MagickBooleanType DrawPushGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPushGraphicContext(DrawingWand *wand) { (void) PushDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetFillAlpha() sets the alpha to use when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawSetFillAlpha method is: % % void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o fill_alpha: fill alpha % */ WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) { DrawSetFillOpacity(wand,fill_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetStrokeAlpha() specifies the alpha of stroked object outlines. % % The format of the DrawSetStrokeAlpha method is: % % void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o stroke_alpha: stroke alpha. The value 1.0 is opaque. % */ WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) { DrawSetStrokeOpacity(wand,stroke_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C o l o r F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickColorFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickColorFloodfillImage method is: % % MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, % const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); PixelGetQuantumColor(fill,&draw_info->fill); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=ColorFloodfillImage(wand->images,draw_info,target,x,y, bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k D e s c r i b e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickDescribeImage() identifies an image by printing its attributes to the % file. Attributes include the image width, height, size, and others. % % The format of the MagickDescribeImage method is: % % const char *MagickDescribeImage(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport char *MagickDescribeImage(MagickWand *wand) { return(MagickIdentifyImage(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k F l a t t e n I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickFlattenImages() merges a sequence of images. This useful for % combining Photoshop layers into a single image. % % The format of the MagickFlattenImages method is: % % MagickWand *MagickFlattenImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickFlattenImages(MagickWand *wand) { Image *flatten_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); flatten_image=FlattenImages(wand->images,wand->exception); if (flatten_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,flatten_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageAttribute() returns a value associated with the specified % property. Use MagickRelinquishMemory() to free the value when you are % finished with it. % % The format of the MagickGetImageAttribute method is: % % char *MagickGetImageAttribute(MagickWand *wand,const char *property) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % */ WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property) { return(MagickGetImageProperty(wand,property)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageIndex() returns the index of the current image. % % The format of the MagickGetImageIndex method is: % % ssize_t MagickGetImageIndex(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport ssize_t MagickGetImageIndex(MagickWand *wand) { return(MagickGetIteratorIndex(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageChannelExtrema() gets the extrema for one or more image % channels. % % The format of the MagickGetImageChannelExtrema method is: % % MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, % const ChannelType channel,size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the image channel(s). % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, const ChannelType channel,size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageChannelExtrema(wand->images,channel,minima,maxima, wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageExtrema() gets the extrema for the image. % % The format of the MagickGetImageExtrema method is: % % MagickBooleanType MagickGetImageExtrema(MagickWand *wand, % size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand, size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageExtrema(wand->images,minima,maxima,wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e M a t t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageMatte() returns MagickTrue if the image has a matte channel % otherwise MagickFalse. % % The format of the MagickGetImageMatte method is: % % size_t MagickGetImageMatte(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(wand->images->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImagePixels() extracts pixel data from an image and returns it to % you. The method returns MagickTrue on success otherwise MagickFalse if an % error is encountered. The data is returned as char, short int, int, ssize_t, % float, or double in the order specified by map. % % Suppose you want to extract the first scanline of a 640x480 image as % character data in red-green-blue order: % % MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickGetImagePixels method is: % % MagickBooleanType MagickGetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter % of a region of pixels you want to extract. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, void *pixels) { return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageSize() returns the image length in bytes. % % The format of the MagickGetImageSize method is: % % MagickBooleanType MagickGetImageSize(MagickWand *wand, % MagickSizeType *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the image length in bytes. % */ WandExport MagickSizeType MagickGetImageSize(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(GetBlobSize(wand->images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMapImage() replaces the colors of an image with the closest color % from a reference image. % % The format of the MagickMapImage method is: % % MagickBooleanType MagickMapImage(MagickWand *wand, % const MagickWand *map_wand,const MagickBooleanType dither) % % A description of each parameter follows: % % o wand: the magick wand. % % o map: the map wand. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ WandExport MagickBooleanType MagickMapImage(MagickWand *wand, const MagickWand *map_wand,const MagickBooleanType dither) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL)) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=MapImage(wand->images,map_wand->images,dither); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a t t e F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMatteFloodfillImage() changes the transparency value of any pixel that % matches target and is an immediate neighbor. If the method % FillToBorderMethod is specified, the transparency value is changed for any % neighbor pixel that does not match the bordercolor member of image. % % The format of the MagickMatteFloodfillImage method is: % % MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, % const double alpha,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, const double alpha,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=MatteFloodfillImage(wand->images,target,ClampToQuantum( (MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M e d i a n F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMedianFilterImage() applies a digital filter that improves the quality % of a noisy image. Each pixel is replaced by the median in a set of % neighboring pixels as defined by radius. % % The format of the MagickMedianFilterImage method is: % % MagickBooleanType MagickMedianFilterImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand, const double radius) { Image *median_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); median_image=MedianFilterImage(wand->images,radius,wand->exception); if (median_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,median_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M i n i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMinimumImages() returns the minimum intensity of an image sequence. % % The format of the MagickMinimumImages method is: % % MagickWand *MagickMinimumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMinimumImages(MagickWand *wand) { Image *minimum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); minimum_image=EvaluateImages(wand->images,MinEvaluateOperator, wand->exception); if (minimum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,minimum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickModeImage() makes each pixel the 'predominant color' of the % neighborhood of the specified radius. % % The format of the MagickModeImage method is: % % MagickBooleanType MagickModeImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickModeImage(MagickWand *wand, const double radius) { Image *mode_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); mode_image=ModeImage(wand->images,radius,wand->exception); if (mode_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,mode_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o s a i c I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMosaicImages() inlays an image sequence to form a single coherent % picture. It returns a wand with each image in the sequence composited at % the location defined by the page offset of the image. % % The format of the MagickMosaicImages method is: % % MagickWand *MagickMosaicImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMosaicImages(MagickWand *wand) { Image *mosaic_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); mosaic_image=MosaicImages(wand->images,wand->exception); if (mosaic_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,mosaic_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickOpaqueImage method is: % % MagickBooleanType MagickOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImage(wand,target,fill,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickPaintFloodfillImage method is: % % MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, % const ChannelType channel,const PixelWand *fill,const double fuzz, % const PixelWand *bordercolor,const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, const ChannelType channel,const PixelWand *fill,const double fuzz, const PixelWand *bordercolor,const ssize_t x,const ssize_t y) { MagickBooleanType status; status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickPaintOpaqueImage method is: % % MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, % const ChannelType channel,const PixelWand *target, % const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz)); } WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, const ChannelType channel,const PixelWand *target,const PixelWand *fill, const double fuzz) { MagickBooleanType status; status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickPaintTransparentImage method is: % % MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e c o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRecolorImage() apply color transformation to an image. The method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the MagickRecolorImage method is: % % MagickBooleanType MagickRecolorImage(MagickWand *wand, % const size_t order,const double *color_matrix) % % A description of each parameter follows: % % o wand: the magick wand. % % o order: the number of columns and rows in the color matrix. % % o color_matrix: An array of doubles representing the color matrix. % */ WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand, const size_t order,const double *color_matrix) { Image *transform_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (color_matrix == (const double *) NULL) return(MagickFalse); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); transform_image=RecolorImage(wand->images,order,color_matrix, wand->exception); if (transform_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,transform_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e d u c e N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickReduceNoiseImage() smooths the contours of an image while still % preserving edge information. The algorithm works by replacing each pixel % with its neighbor closest in value. A neighbor is defined by radius. Use % a radius of 0 and ReduceNoise() selects a suitable radius for you. % % The format of the MagickReduceNoiseImage method is: % % MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, const double radius) { Image *noise_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); noise_image=ReduceNoiseImage(wand->images,radius,wand->exception); if (noise_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,noise_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a x i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMaximumImages() returns the maximum intensity of an image sequence. % % The format of the MagickMaximumImages method is: % % MagickWand *MagickMaximumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMaximumImages(MagickWand *wand) { Image *maximum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator, wand->exception); if (maximum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,maximum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageAttribute() associates a property with an image. % % The format of the MagickSetImageAttribute method is: % % MagickBooleanType MagickSetImageAttribute(MagickWand *wand, % const char *property,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % % o value: the value. % */ WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand, const char *property,const char *value) { return(SetImageProperty(wand->images,property,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageIndex() set the current image to the position of the list % specified with the index parameter. % % The format of the MagickSetImageIndex method is: % % MagickBooleanType MagickSetImageIndex(MagickWand *wand, % const ssize_t index) % % A description of each parameter follows: % % o wand: the magick wand. % % o index: the scene number. % */ WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand, const ssize_t index) { return(MagickSetIteratorIndex(wand,index)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k S e t I m a g e O p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageOption() associates one or options with a particular image % format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes"). % % The format of the MagickSetImageOption method is: % % MagickBooleanType MagickSetImageOption(MagickWand *wand, % const char *format,const char *key,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o format: the image format. % % o key: The key. % % o value: The value. % */ WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand, const char *format,const char *key,const char *value) { char option[MaxTextExtent]; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); (void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value); return(DefineImageOption(wand->image_info,option)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickTransparentImage method is: % % MagickBooleanType MagickTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickPaintTransparentImage(wand,target,alpha,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e g i o n O f I n t e r e s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRegionOfInterestImage() extracts a region of the image and returns it % as a new wand. % % The format of the MagickRegionOfInterestImage method is: % % MagickWand *MagickRegionOfInterestImage(MagickWand *wand, % const size_t width,const size_t height,const ssize_t x, % const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o width: the region width. % % o height: the region height. % % o x: the region x offset. % % o y: the region y offset. % */ WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand, const size_t width,const size_t height,const ssize_t x, const ssize_t y) { return(MagickGetImageRegion(wand,width,height,x,y)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImagePixels() accepts pixel datand stores it in the image at the % location you specify. The method returns MagickFalse on success otherwise % MagickTrue if an error is encountered. The pixel data can be either char, % short int, int, ssize_t, float, or double in the order specified by map. % % Suppose your want to upload the first scanline of a 640x480 image from % character data in red-green-blue order: % % MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickSetImagePixels method is: % % MagickBooleanType MagickSetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % const void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter of a region % of pixels you want to define. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, % or DoublePixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, const void *pixels) { return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k W r i t e I m a g e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickWriteImageBlob() implements direct to memory image formats. It % returns the image as a blob and its length. Use MagickSetFormat() to % set the format of the returned blob (GIF, JPEG, PNG, etc.). % % Use MagickRelinquishMemory() to free the blob when you are done with it. % % The format of the MagickWriteImageBlob method is: % % unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the length of the blob. % */ WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) { return(MagickGetImageBlob(wand,length)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelView() returns a pixel view required for all other methods in the % Pixel View API. % % The format of the NewPixelView method is: % % PixelView *NewPixelView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport PixelView *NewPixelView(MagickWand *wand) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->wand=wand; pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images, pixel_view->exception); pixel_view->region.width=wand->images->columns; pixel_view->region.height=wand->images->rows; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelViewRegion() returns a pixel view required for all other methods % in the Pixel View API. % % The format of the NewPixelViewRegion method is: % % PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a region of % pixel_wands view. % */ WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images, pixel_view->exception); pixel_view->wand=wand; pixel_view->region.width=width; pixel_view->region.height=height; pixel_view->region.x=x; pixel_view->region.y=y; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l G e t N e x t R o w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelGetNextRow() returns the next row as an array of pixel wands from the % pixel iterator. % % The format of the PixelGetNextRow method is: % % PixelWand **PixelGetNextRow(PixelIterator *iterator, % size_t *number_wands) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o number_wands: the number of pixel wands. % */ WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator) { size_t number_wands; return(PixelGetNextIteratorRow(iterator,&number_wands)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l I t e r a t o r G e t E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelIteratorGetException() returns the severity, reason, and description of % any error that occurs when using other methods in this API. % % The format of the PixelIteratorGetException method is: % % char *PixelIteratorGetException(const Pixeliterator *iterator, % ExceptionType *severity) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o severity: the severity of the error is returned here. % */ WandExport char *PixelIteratorGetException(const PixelIterator *iterator, ExceptionType *severity) { return(PixelGetIteratorException(iterator,severity)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelViewIterator() iterates over the pixel view in parallel and calls % your set method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetPixelViewIterator method is: % % MagickBooleanType SetPixelViewIterator(PixelView *destination, % SetPixelViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the pixel view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination, SetPixelViewMethod set,void *context) { #define SetPixelViewTag "PixelView/Set" ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(destination != (PixelView *) NULL); assert(destination->signature == WandSignature); if (set == (SetPixelViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=destination->region.y; y < (ssize_t) destination->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x, y,destination->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_SetPixelViewIterator) #endif proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++, destination->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferPixelViewIterator() iterates over two pixel views in parallel and % calls your transfer method for each scanline of the view. The source pixel % region is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination pixel view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferPixelViewIterator method is: % % MagickBooleanType TransferPixelViewIterator(PixelView *source, % PixelView *destination,TransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source, PixelView *destination,TransferPixelViewMethod transfer,void *context) { #define TransferPixelViewTag "PixelView/Transfer" ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (transfer(source,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_TransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdatePixelViewIterator() iterates over the pixel view in parallel and calls % your update method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdatePixelViewIterator method is: % % MagickBooleanType UpdatePixelViewIterator(PixelView *source, % UpdatePixelViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source, UpdatePixelViewMethod update,void *context) { #define UpdatePixelViewTag "PixelView/Update" ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (update == (UpdatePixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y, source->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (update(source,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->region.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_UpdatePixelViewIterator) #endif proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } #endif
6811.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for for (t4 = 1; t4 <= nx - 1; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < nx - 1 ? t4 + 31 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 16) for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 1; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < nx - 1 ? t4 + 31 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 16) for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < nx - 2 ? t4 + 31 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 16) for (t10 = t8; t10 <= (ny - 2 < t8 + 15 ? ny - 2 : t8 + 15); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
absval_hcl_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: renzun@openailab.com */ #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include <math.h> #include <arm_neon.h> static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); float* idata = ( float* )input_tensor->data; float* odata = ( float* )output_tensor->data; int channel_num = input_tensor->dims[1]; int batch_number = input_tensor->dims[0]; int channel_size = (input_tensor->dims[2]) * (input_tensor->dims[3]); int num_thread = exec_graph->num_thread; #pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel_num * batch_number; c++) { for (int i = 0; i < (channel_size & -4); i += 4) { float32x4_t _p = vld1q_f32(idata); _p = vabsq_f32(_p); vst1q_f32(odata, _p); idata += 4; odata += 4; } for (int i = channel_size & ~3; i < channel_size; i++) { if (*idata < 0) *odata = -*idata; else *odata = *idata; idata++; odata++; } } return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { struct ir_node* ir_node = exec_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); if (input_tensor->data_type != TENGINE_DT_FP32 || input_tensor->layout != TENGINE_LAYOUT_NCHW) return 0; return OPS_SCORE_BEST; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_absval_hcl_ops(void* arg) { return register_builtin_node_ops(OP_ABSVAL, &hcl_node_ops); } static int unreg_absval_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_ABSVAL, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_absval_hcl_ops); AUTO_UNREGISTER_OPS(unreg_absval_hcl_ops);
GB_dense_ewise3_accum_template.c
//------------------------------------------------------------------------------ // GB_dense_ewise3_accum_template: C += A+B where all 3 matrices are dense //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // No matrix is iso. { //-------------------------------------------------------------------------- // get A, B, and C //-------------------------------------------------------------------------- // any matrix may be aliased to any other (C==A, C==B, and/or A==B) GB_ATYPE *Ax = (GB_ATYPE *) A->x ; GB_BTYPE *Bx = (GB_BTYPE *) B->x ; GB_CTYPE *Cx = (GB_CTYPE *) C->x ; const int64_t cnz = GB_nnz (C) ; ASSERT (!C->iso) ; ASSERT (!A->iso) ; ASSERT (!B->iso) ; int64_t p ; //-------------------------------------------------------------------------- // C += A+B where all 3 matries are dense //-------------------------------------------------------------------------- if (A == B) { //---------------------------------------------------------------------- // C += A+A where A and C are dense //---------------------------------------------------------------------- // C += A+A #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { GB_GETA (aij, Ax, p, false) ; // aij = Ax [p] GB_CTYPE_SCALAR (t) ; // declare scalar t GB_BINOP (t, aij, aij, 0, 0) ; // t = aij + aij GB_BINOP (GB_CX (p), GB_CX (p), t, 0, 0) ; // Cx [p] = cij + t } } else { //---------------------------------------------------------------------- // C += A+B where all 3 matrices are dense //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { GB_GETA (aij, Ax, p, false) ; // aij = Ax [p] GB_GETB (bij, Bx, p, false) ; // bij = Bx [p] GB_CTYPE_SCALAR (t) ; // declare scalar t GB_BINOP (t, aij, bij, 0, 0) ; // t = aij + bij GB_BINOP (GB_CX (p), GB_CX (p), t, 0, 0) ; // Cx [p] = cij + t } } }
test_init_mt.c
/** * Copyright (C) Mellanox Technologies Ltd. 2020. ALL RIGHTS RESERVED. * * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <ucp/api/ucp.h> #if _OPENMP #include <omp.h> #endif int main(int argc, char **argv) { int count = 0; #pragma omp parallel { ucs_status_t ctx_status, worker_status; ucp_context_h context; ucp_worker_h worker; ucp_params_t params; ucp_worker_params_t wparams; params.field_mask = UCP_PARAM_FIELD_FEATURES; params.features = UCP_FEATURE_TAG | UCP_FEATURE_STREAM; ctx_status = ucp_init(&params, NULL, &context); if (ctx_status == UCS_OK) { wparams.field_mask = 0; worker_status = ucp_worker_create(context, &wparams, &worker); if (worker_status == UCS_OK) { __sync_add_and_fetch(&count, 1); } } #pragma omp barrier if (ctx_status == UCS_OK) { if (worker_status == UCS_OK) { ucp_worker_destroy(worker); } ucp_cleanup(context); } } #pragma omp barrier printf("finished %d threads\n", count); return 0; }
GB_subassign_11.c
//------------------------------------------------------------------------------ // GB_subassign_11: C(I,J)<M,repl> += scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 11: C(I,J)<M,repl> += scalar ; using S // M: present // Mask_comp: false // C_replace: true // accum: present // A: scalar // S: constructed // C, M: not bitmap #include "GB_unused.h" #include "GB_subassign_methods.h" GrB_Info GB_subassign_11 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, const GrB_BinaryOp accum, const void *scalar, const GrB_Type atype, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (&S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (M) ; GB_GET_C ; // C must not be bitmap GB_GET_MASK ; GB_GET_ACCUM_SCALAR ; GB_GET_S ; //-------------------------------------------------------------------------- // Method 11: C(I,J)<M,repl> += scalar ; using S //-------------------------------------------------------------------------- // Time: Optimal. All entries in M+S must be examined. All entries in S // are modified: if M(i,j)=1 then S(i,j) is used to write to the // corresponding entry in C. If M(i,j) is not present, or zero, then the // entry in C is cleared (because of C_replace). If S(i,j) is not present, // and M(i,j)=1, then the scalar is inserted into C. The only case that // can be skipped is if neither S nor M is present. As a result, this // method need not traverse all of IxJ. It can limit its traversal to the // pattern of M+S. // Method 09 and Method 11 are very similar. //-------------------------------------------------------------------------- // Parallel: M+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- if (M_is_bitmap) { // all of IxJ must be examined GB_SUBASSIGN_IXJ_SLICE ; } else { // traverse all M+S GB_SUBASSIGN_TWO_SLICE (M, S) ; } //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- if (M_is_bitmap) { //---------------------------------------------------------------------- // phase1: M is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iM_start, iM_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iM_start:iM_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iM_start) ; int64_t pM_start = j * Mvlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j) //-------------------------------------------------------------- for (int64_t iM = iM_start ; iM < iM_end ; iM++) { int64_t pM = pM_start + iM ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ; bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ; if (Sfound && !mij) { // S (i,j) is present but M (i,j) is false // ----[C A 0] or [X A 0]------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } else if (!Sfound && mij) { // S (i,j) is not present, M (i,j) is true // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) task_pending++ ; } else if (Sfound && mij) { // S (i,j) present and M (i,j) is true GB_C_S_LOOKUP ; // ----[C A 1] or [X A 1]------------------------------- // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_withaccum_C_A_1_scalar ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase1: M is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE1 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get S(:,j) and M(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and M(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and M (:,j) have entries while (pS < pS_end && pM < pM_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iM = GBI (Mi, pM, Mvlen) ; if (iS < iM) { // S (i,j) is present but M (i,j) is not // ----[C A 0] or [X A 0]------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } else if (iM < iS) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (M) ; } else { // both S (i,j) and M (i,j) present GB_C_S_LOOKUP ; if (GB_mcast (Mx, pM, msize)) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_withaccum_C_A_1_scalar ; } else { // ----[C A 0] or [X A 0]--------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): now zombie GB_DELETE_ENTRY ; } GB_NEXT (S) ; GB_NEXT (M) ; } } // while list S (:,j) has entries. List M (:,j) exhausted. while (pS < pS_end) { // S (i,j) is present but M (i,j) is not // ----[C A 0] or [X A 0]----------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } // while list M (:,j) has entries. List S (:,j) exhausted. while (pM < pM_end) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (M) ; } } GB_PHASE1_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; if (M_is_bitmap) { //---------------------------------------------------------------------- // phase2: M is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iM_start, iM_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iM_start:iM_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iM_start) ; int64_t pM_start = j * Mvlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; for (int64_t iM = iM_start ; iM < iM_end ; iM++) { int64_t pM = pM_start + iM ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ; bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ; if (!Sfound && mij) { // S (i,j) is not present, M (i,j) is true // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } else if (Sfound) { // S (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase2: M is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE2 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get S(:,j) and M(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and M(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and M (:,j) have entries while (pS < pS_end && pM < pM_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iM = GBI (Mi, pM, Mvlen) ; if (iS < iM) { // S (i,j) is present but M (i,j) is not GB_NEXT (S) ; } else if (iM < iS) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } GB_NEXT (M) ; } else { // both S (i,j) and M (i,j) present GB_NEXT (S) ; GB_NEXT (M) ; } } // while list M (:,j) has entries. List S (:,j) exhausted. while (pM < pM_end) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) int64_t iM = GBI (Mi, pM, Mvlen) ; int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } GB_NEXT (M) ; } } GB_PHASE2_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
9035.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { #pragma omp parallel for schedule(static, 1) simd for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp parallel for schedule(static, 1) simd for (i = 0; i < _PB_N; i++) { #pragma omp target teams distribute thread_limit(128) schedule(static, 1) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ #pragma omp parallel for schedule(static, 1) simd for (j1 = 0; j1 < _PB_M; j1++) { #pragma omp target teams distribute thread_limit(128) schedule(static, 1) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
nr_direct.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <assert.h> #include <math.h> //#include <omp.h> #include "config.h" #include "cint.h" #include "optimizer.h" #include "nr_direct.h" int GTOmax_shell_dim(const int *ao_loc, const int *shls_slice, int ncenter); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); #define DECLARE_ALL \ const int *atm = envs->atm; \ const int *bas = envs->bas; \ const double *env = envs->env; \ const int natm = envs->natm; \ const int nbas = envs->nbas; \ const int *ao_loc = envs->ao_loc; \ const int *shls_slice = envs->shls_slice; \ const CINTOpt *cintopt = envs->cintopt; \ const int ioff = ao_loc[shls_slice[0]]; \ const int joff = ao_loc[shls_slice[2]]; \ const int koff = ao_loc[shls_slice[4]]; \ const int loff = ao_loc[shls_slice[6]]; \ const int i0 = ao_loc[ish] - ioff; \ const int j0 = ao_loc[jsh] - joff; \ const int i1 = ao_loc[ish+1] - ioff; \ const int j1 = ao_loc[jsh+1] - joff; \ const int di = i1 - i0; \ const int dj = j1 - j0; \ const int ncomp = envs->ncomp; \ const int dk = GTOmax_shell_dim(ao_loc, shls_slice+4, 2); \ double *cache = buf + di * dj * dk * dk * ncomp; \ int shls[4]; \ void (*pf)(double *eri, double *dm, JKArray *vjk, int *shls, \ int i0, int i1, int j0, int j1, \ int k0, int k1, int l0, int l1); \ int (*fprescreen)(); \ if (vhfopt) { \ fprescreen = vhfopt->fprescreen; \ } else { \ fprescreen = CVHFnoscreen; \ } \ int ksh, lsh, k0, k1, l0, l1, idm; #define INTOR_AND_CONTRACT \ if ((*fprescreen)(shls, vhfopt, atm, bas, env) \ && (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, \ cintopt, cache)) { \ k0 = ao_loc[ksh] - koff; \ l0 = ao_loc[lsh] - loff; \ k1 = ao_loc[ksh+1] - koff; \ l1 = ao_loc[lsh+1] - loff; \ for (idm = 0; idm < n_dm; idm++) { \ pf = jkop[idm]->contract; \ (*pf)(buf, dms[idm], vjk[idm], shls, \ i0, i1, j0, j1, k0, k1, l0, l1); \ } \ } /* * for given ksh, lsh, loop all ish, jsh */ void CVHFdot_nrs1(int (*intor)(), JKOperator **jkop, JKArray **vjk, double **dms, double *buf, int n_dm, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { DECLARE_ALL; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int lsh0 = shls_slice[6]; const int lsh1 = shls_slice[7]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { for (lsh = lsh0; lsh < lsh1; lsh++) { shls[2] = ksh; shls[3] = lsh; INTOR_AND_CONTRACT; } } } /* * for given ish, jsh, loop all ksh > lsh */ static void dot_nrs2sub(int (*intor)(), JKOperator **jkop, JKArray **vjk, double **dms, double *buf, int n_dm, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { DECLARE_ALL; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int lsh0 = shls_slice[6]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { for (lsh = lsh0; lsh <= ksh; lsh++) { shls[2] = ksh; shls[3] = lsh; INTOR_AND_CONTRACT; } } } void CVHFdot_nrs2ij(int (*intor)(), JKOperator **jkop, JKArray **vjk, double **dms, double *buf, int n_dm, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { if (ish >= jsh) { CVHFdot_nrs1(intor, jkop, vjk, dms, buf, n_dm, ish, jsh, vhfopt, envs); } } void CVHFdot_nrs2kl(int (*intor)(), JKOperator **jkop, JKArray **vjk, double **dms, double *buf, int n_dm, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { dot_nrs2sub(intor, jkop, vjk, dms, buf, n_dm, ish, jsh, vhfopt, envs); } void CVHFdot_nrs4(int (*intor)(), JKOperator **jkop, JKArray **vjk, double **dms, double *buf, int n_dm, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { if (ish >= jsh) { dot_nrs2sub(intor, jkop, vjk, dms, buf, n_dm, ish, jsh, vhfopt, envs); } } void CVHFdot_nrs8(int (*intor)(), JKOperator **jkop, JKArray **vjk, double **dms, double *buf, int n_dm, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { if (ish < jsh) { return; } DECLARE_ALL; const int ksh0 = shls_slice[4]; const int lsh0 = shls_slice[6]; // to make fjk compatible to C-contiguous dm array, put ksh, lsh inner loop shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh <= ish; ksh++) { for (lsh = lsh0; lsh <= ksh; lsh++) { /* when ksh==ish, (lsh<jsh) misses some integrals (eg k<i&&l>j). * These integrals are calculated in the next (ish,jsh) pair. To show * that, we just need to prove that every elements in shell^4 appeared * only once in fjk_s8. */ if ((ksh == ish) && (lsh > jsh)) { break; } shls[2] = ksh; shls[3] = lsh; INTOR_AND_CONTRACT; } } } static void assemble_v(double *vjk, JKArray *jkarray, int *ao_loc) { int ish0 = jkarray->v_bra_sh0; int ish1 = jkarray->v_bra_sh1; int jsh0 = jkarray->v_ket_sh0; int jsh1 = jkarray->v_ket_sh1; int njsh = jsh1 - jsh0; size_t vrow = jkarray->v_dims[0]; size_t vcol = jkarray->v_dims[1]; int ncomp = jkarray->ncomp; int voffset = ao_loc[ish0] * vcol + ao_loc[jsh0]; int i, j, ish, jsh; int di, dj, icomp; int optr; double *data, *pv; for (ish = ish0; ish < ish1; ish++) { for (jsh = jsh0; jsh < jsh1; jsh++) { optr = jkarray->outptr[ish*njsh+jsh-jkarray->offset0_outptr]; if (optr != NOVALUE) { di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; data = jkarray->data + optr; pv = vjk + ao_loc[ish]*vcol+ao_loc[jsh] - voffset; for (icomp = 0; icomp < ncomp; icomp++) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pv[i*vcol+j] += data[i*dj+j]; } } pv += vrow * vcol; data += di * dj; } } } } } /* * drv loop over ij, generate eris of kl for given ij, call fjk to * calculate vj, vk. * * n_dm is the number of dms for one [array(ij|kl)], it is also the size of dms and vjk * ncomp is the number of components that produced by intor * shls_slice = [ishstart, ishend, jshstart, jshend, kshstart, kshend, lshstart, lshend] * * ao_loc[i+1] = ao_loc[i] + CINTcgto_spheric(i, bas) for i = 0..nbas * * Return [(ptr[ncomp,nao,nao] in C-contiguous) for ptr in vjk] */ void CVHFnr_direct_drv(int (*intor)(), void (*fdot)(), JKOperator **jkop, double **dms, double **vjk, int n_dm, int ncomp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, CVHFOpt *vhfopt, int *atm, int natm, int *bas, int nbas, double *env) { IntorEnvs envs = {natm, nbas, atm, bas, env, shls_slice, ao_loc, NULL, cintopt, ncomp}; int idm; size_t size; for (idm = 0; idm < n_dm; idm++) { size = jkop[idm]->data_size(shls_slice, ao_loc) * ncomp; memset(vjk[idm], 0, sizeof(double)*size); } const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 4); const int cache_size = GTOmax_cache_size(intor, shls_slice, 4, atm, natm, bas, nbas, env); #pragma omp parallel default(none) \ shared(intor, fdot, jkop, ao_loc, shls_slice, \ dms, vjk, n_dm, ncomp, nbas, vhfopt, envs) { int i, j, ij, ij1; JKArray *v_priv[n_dm]; for (i = 0; i < n_dm; i++) { v_priv[i] = jkop[i]->allocate(shls_slice, ao_loc, ncomp); } double *buf = malloc(sizeof(double) * (di*di*di*di*ncomp + cache_size)); #pragma omp for nowait schedule(dynamic, 1) for (ij = 0; ij < nish*njsh; ij++) { ij1 = nish*njsh-1 - ij; // if (ij % 2) { ///* interlace the iteration to balance memory usage // * map [0,1,2...,N] to [0,N,1,N-1,...] */ // ij1 = nish*njsh-1 - ij/2; // } else { // ij1 = ij / 2; // } i = ij1 / njsh + ish0; j = ij1 % njsh + jsh0; (*fdot)(intor, jkop, v_priv, dms, buf, n_dm, i, j, vhfopt, &envs); } #pragma omp critical { for (i = 0; i < n_dm; i++) { assemble_v(vjk[i], v_priv[i], ao_loc); jkop[i]->deallocate(v_priv[i]); } } free(buf); } }
ewem.c
#include "seismic.h" #include "ewem.h" void ewem(float **ux, float **uy, float **uz, float **mpp, float **mps1, float **mps2, float **wav, int nt, float ot, float dt, int nmx,float omx, float dmx, int nmy,float omy, float dmy, float sx,float sy, int nz, float oz, float dz, float gz, float sz, float **vp, float **vs, float fmin, float fmax, int padt, int padx, bool adj, bool verbose) /*< elastic wave equation depth migration operator. >*/ { int iz,ix,imx,imy,igx,igy,ik,iw,it,nw,nkx,nky,ntfft; float dw,dkx,dky; int ifmin,ifmax; float *d_t; complex *d_w; complex **ux_g_wx,**uy_g_wx,**uz_g_wx,**u_s_wx; fftwf_complex *a,*b; int *n; fftwf_plan p1,p2; float *po_p,**pd_p; float *po_s,**pd_s; float progress; int ithread,nthread; float max_source; float **mpp_threads,**mps1_threads,**mps2_threads; if (adj){ for (ix=0;ix<nmx*nmy;ix++) for (iz=0;iz<nz;iz++) mpp[ix][iz] = 0.; for (ix=0;ix<nmx*nmy;ix++) for (iz=0;iz<nz;iz++) mps1[ix][iz] = 0.; for (ix=0;ix<nmx*nmy;ix++) for (iz=0;iz<nz;iz++) mps2[ix][iz] = 0.; } else{ for (ix=0;ix<nmx*nmy;ix++) for (it=0;it<nt;it++) ux[ix][it] = 0.; for (ix=0;ix<nmx*nmy;ix++) for (it=0;it<nt;it++) uy[ix][it] = 0.; for (ix=0;ix<nmx*nmy;ix++) for (it=0;it<nt;it++) uz[ix][it] = 0.; } ntfft = (int) 2*truncf(padt*((float) nt)/2); nw = (int) truncf(ntfft/2)+1; nkx = nmx > 1 ? padx*nmx : 1; nky = nmy > 1 ? padx*nmy : 1; dkx = 2*PI/((float) nkx)/dmx; dky = 2*PI/((float) nky)/dmy; dw = 2*PI/((float) ntfft)/dt; if(fmax*dt*ntfft+1<nw) ifmax = trunc(fmax*dt*ntfft)+1; else ifmax = nw; if(fmin*dt*ntfft+1<ifmax) ifmin = trunc(fmin*dt*ntfft); else ifmin = 0; ux_g_wx = alloc2complex(nw,nmx*nmy); uy_g_wx = alloc2complex(nw,nmx*nmy); uz_g_wx = alloc2complex(nw,nmx*nmy); u_s_wx = alloc2complex(nw,nmx*nmy); d_t = alloc1float(nt); d_w = alloc1complex(nw); for (it=0;it<nt;it++) d_t[it] = 0.; for (iw=0;iw<nw;iw++) d_w[iw] = 0.; /* decompose slowness into layer average, and layer purturbation */ po_p = alloc1float(nz); pd_p = alloc2float(nz,nmx*nmy); for (iz=0;iz<nz;iz++){ po_p[iz] = 0.; for (ix=0;ix<nmx*nmy;ix++) po_p[iz] += vp[ix][iz]; po_p[iz] /= (float) nmx*nmy; po_p[iz] = 1./po_p[iz]; for (ix=0;ix<nmx*nmy;ix++) pd_p[ix][iz] = 1.0/vp[ix][iz] - po_p[iz]; } po_s = alloc1float(nz); pd_s = alloc2float(nz,nmx*nmy); for (iz=0;iz<nz;iz++){ po_s[iz] = 0.; for (ix=0;ix<nmx*nmy;ix++) po_s[iz] += vs[ix][iz]; po_s[iz] /= (float) nmx*nmy; po_s[iz] = 1./po_s[iz]; for (ix=0;ix<nmx*nmy;ix++) pd_s[ix][iz] = 1.0/vs[ix][iz] - po_s[iz]; } /* set up fftw plans and pass them to the OMP region of the code */ a = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky); b = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky); n = alloc1int(2); n[0] = nkx; n[1] = nky; p1 = fftwf_plan_dft(2, n, a, a, FFTW_FORWARD, FFTW_MEASURE); p2 = fftwf_plan_dft(2, n, b, b, FFTW_BACKWARD, FFTW_MEASURE); for (ik=0;ik<nkx*nky;ik++){ a[ik] = 0.; b[ik] = 0.; } fftwf_execute_dft(p1,a,a); fftwf_execute_dft(p2,b,b); /**********************************************************************/ igx = (int) truncf((sx - omx)/dmx); /*position to inject source in x-dir*/ igy = (int) truncf((sy - omy)/dmy); /*position to inject source in y-dir*/ //fprintf(stderr,"adj=%d igx=%d\n",adj,igx); //fprintf(stderr,"adj=%d igy=%d\n",adj,igy); /* source wavefield*/ for (ix=0;ix<nmx*nmy;ix++) for (iw=0;iw<nw;iw++) u_s_wx[ix][iw] = 0.; for (it=0;it<nt;it++) d_t[it] = wav[0][it]; f_op(d_w,d_t,nw,nt,1); /* d_t to d_w */ for (iw=0;iw<nw;iw++) u_s_wx[igx*nmy + igy][iw] = d_w[iw]; /* receiver wavefield*/ if (adj){ for (ix=0;ix<nmx*nmy;ix++){ // x component for (it=0;it<nt;it++) d_t[it] = ux[ix][it]; f_op(d_w,d_t,nw,nt,1); /* d_t to d_w */ for (iw=0;iw<ifmin;iw++) ux_g_wx[ix][iw] = 0.; for (iw=ifmin;iw<ifmax;iw++) ux_g_wx[ix][iw] = d_w[iw]; for (iw=ifmax;iw<nw;iw++) ux_g_wx[ix][iw] = 0.; // y component for (it=0;it<nt;it++) d_t[it] = uy[ix][it]; f_op(d_w,d_t,nw,nt,1); /* d_t to d_w */ for (iw=0;iw<ifmin;iw++) uy_g_wx[ix][iw] = 0.; for (iw=ifmin;iw<ifmax;iw++) uy_g_wx[ix][iw] = d_w[iw]; for (iw=ifmax;iw<nw;iw++) uy_g_wx[ix][iw] = 0.; // z component for (it=0;it<nt;it++) d_t[it] = uz[ix][it]; f_op(d_w,d_t,nw,nt,1); /* d_t to d_w */ for (iw=0;iw<ifmin;iw++) uz_g_wx[ix][iw] = 0.; for (iw=ifmin;iw<ifmax;iw++) uz_g_wx[ix][iw] = d_w[iw]; for (iw=ifmax;iw<nw;iw++) uz_g_wx[ix][iw] = 0.; } } else{ for (ix=0;ix<nmx*nmy;ix++){ for (iw=0;iw<nw;iw++){ ux_g_wx[ix][iw] = 0.; uy_g_wx[ix][iw] = 0.; uz_g_wx[ix][iw] = 0.; } } } max_source = 0.; for (it=0;it<nt;it++) if (max_source < fabsf(wav[0][it])/sqrtf((float) ntfft)) max_source = fabsf(wav[0][it])/sqrtf((float) ntfft); nthread = omp_thread_count(); //fprintf(stderr,"nthread=%d\n",nthread); if (adj){ mpp_threads = alloc2float(nz,nmx*nmy*nthread); mps1_threads = alloc2float(nz,nmx*nmy*nthread); mps2_threads = alloc2float(nz,nmx*nmy*nthread); for (imx=0;imx<nmx;imx++){ for (imy=0;imy<nmy;imy++){ for (ithread=0;ithread<nthread;ithread++){ for (iz=0;iz<nz;iz++){ mpp_threads[imx*nmy*nthread + imy*nthread + ithread][iz] = 0.; mps1_threads[imx*nmy*nthread + imy*nthread + ithread][iz] = 0.; mps2_threads[imx*nmy*nthread + imy*nthread + ithread][iz] = 0.; } } } } } else{ mpp_threads = alloc2float(nz,nmx*nmy); mps1_threads = alloc2float(nz,nmx*nmy); mps2_threads = alloc2float(nz,nmx*nmy); for (imx=0;imx<nmx;imx++){ for (imy=0;imy<nmy;imy++){ for (iz=0;iz<nz;iz++){ mpp_threads[imx*nmy + imy][iz] = mpp[imx*nmy + imy][iz]; mps1_threads[imx*nmy + imy][iz] = mps1[imx*nmy + imy][iz]; mps2_threads[imx*nmy + imy][iz] = mps2[imx*nmy + imy][iz]; } } } } progress = 0.; #pragma omp parallel for private(iw) shared(mpp_threads,mps1_threads,mps2_threads,ux_g_wx,uy_g_wx,uz_g_wx,u_s_wx) for (iw=ifmin;iw<ifmax;iw++){ progress += 1./((float) ifmax - ifmin); if (verbose) progress_msg(progress); elastic_extrap1f(mpp_threads,mps1_threads,mps2_threads, ux_g_wx,uy_g_wx,uz_g_wx, u_s_wx, max_source,iw,nw,ifmax,ntfft, dw,dkx,dky,nkx,nky, nz,oz,dz,gz,sz,nmx,omx,dmx,nmy,omy,dmy, nthread, vp,po_p,pd_p,vs,po_s,pd_s, p1,p2,adj,verbose); } if (verbose) fprintf(stderr,"\n"); if (adj){ // reduction over parallel axis for (imx=0;imx<nmx;imx++){ for (imy=0;imy<nmy;imy++){ for (ithread=0;ithread<nthread;ithread++){ for (iz=0;iz<nz;iz++){ mpp[imx*nmy + imy][iz] += mpp_threads[imx*nmy*nthread + imy*nthread + ithread][iz]; mps1[imx*nmy + imy][iz] += mps1_threads[imx*nmy*nthread + imy*nthread + ithread][iz]; mps2[imx*nmy + imy][iz] += mps2_threads[imx*nmy*nthread + imy*nthread + ithread][iz]; } } } } } else{ for (ix=0;ix<nmx*nmy;ix++){ // x component for (iw=0;iw<ifmin;iw++) d_w[iw] = 0.; for (iw=ifmin;iw<ifmax;iw++) d_w[iw] = ux_g_wx[ix][iw]; for (iw=ifmax;iw<nw;iw++) d_w[iw] = 0.; f_op(d_w,d_t,nw,nt,0); /* d_w to d_t */ for (it=0;it<nt;it++) ux[ix][it] = d_t[it]; // y component for (iw=0;iw<ifmin;iw++) d_w[iw] = 0.; for (iw=ifmin;iw<ifmax;iw++) d_w[iw] = uy_g_wx[ix][iw]; for (iw=ifmax;iw<nw;iw++) d_w[iw] = 0.; f_op(d_w,d_t,nw,nt,0); /* d_w to d_t */ for (it=0;it<nt;it++) uy[ix][it] = d_t[it]; // x component for (iw=0;iw<ifmin;iw++) d_w[iw] = 0.; for (iw=ifmin;iw<ifmax;iw++) d_w[iw] = uz_g_wx[ix][iw]; for (iw=ifmax;iw<nw;iw++) d_w[iw] = 0.; f_op(d_w,d_t,nw,nt,0); /* d_w to d_t */ for (it=0;it<nt;it++) uz[ix][it] = d_t[it]; } } free1int(n); fftwf_free(a); fftwf_free(b); fftwf_destroy_plan(p1); fftwf_destroy_plan(p2); free1float(d_t); free1complex(d_w); free2complex(ux_g_wx); free2complex(uy_g_wx); free2complex(uz_g_wx); free2complex(u_s_wx); free1float(po_p); free2float(pd_p); free1float(po_s); free2float(pd_s); free2float(mpp_threads); free2float(mps1_threads); free2float(mps2_threads); return; } void elastic_extrap1f(float **mpp, float **mps1, float **mps2, complex **ux_g_wx, complex **uy_g_wx, complex **uz_g_wx, complex **u_s_wx, float max_source, int iw, int nw,int ifmax,int ntfft,float dw,float dkx,float dky,int nkx,int nky, int nz, float oz, float dz, float gz, float sz, int nmx,float omx, float dmx, int nmy,float omy, float dmy, int nthread, float **vp,float *po_p,float **pd_p, float **vs,float *po_s,float **pd_s, fftwf_plan p1,fftwf_plan p2, bool adj, bool verbose) /*< extrapolate 1 frequency >*/ { float w,factor,z; int iz,ix,imx,imy,ithread; complex *ux_xg,*uy_xg,*uz_xg; complex *up_xg,*us1_xg,*us2_xg; complex *up_xs; complex **smig; ithread = omp_get_thread_num(); //fprintf(stderr,"ithread=%d\n",ithread); ux_xg = alloc1complex(nmx*nmy); uy_xg = alloc1complex(nmx*nmy); uz_xg = alloc1complex(nmx*nmy); up_xg = alloc1complex(nmx*nmy); us1_xg = alloc1complex(nmx*nmy); us2_xg = alloc1complex(nmx*nmy); up_xs = alloc1complex(nmx*nmy); smig = alloc2complex(nz,nmx*nmy); for (ix=0;ix<nmx*nmy;ix++) ux_xg[ix] = 0.; for (ix=0;ix<nmx*nmy;ix++) uy_xg[ix] = 0.; for (ix=0;ix<nmx*nmy;ix++) uz_xg[ix] = 0.; for (ix=0;ix<nmx*nmy;ix++) up_xg[ix] = 0.; for (ix=0;ix<nmx*nmy;ix++) us1_xg[ix] = 0.; for (ix=0;ix<nmx*nmy;ix++) us2_xg[ix] = 0.; for (ix=0;ix<nmx*nmy;ix++) up_xs[ix] = 0.; if (iw==0) factor = 1.; else factor = 2.; w = iw*dw; for (ix=0;ix<nmx*nmy;ix++){ up_xs[ix] = u_s_wx[ix][iw]/sqrtf((float) ntfft); ux_xg[ix] = ux_g_wx[ix][iw]/sqrtf((float) ntfft); uy_xg[ix] = uy_g_wx[ix][iw]/sqrtf((float) ntfft); uz_xg[ix] = uz_g_wx[ix][iw]/sqrtf((float) ntfft); } for (ix=0;ix<nmx*nmy;ix++) up_xs[ix] = u_s_wx[ix][iw]/sqrtf((float) ntfft); for (iz=0;iz<nz;iz++){ // extrapolate source wavefield z = oz + dz*iz; if (z >= sz){ ssop(up_xs,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,-dz,iz,vp,po_p,pd_p,p1,p2,true,true,verbose); for (ix=0;ix<nmx*nmy;ix++) smig[ix][iz] = up_xs[ix]/max_source; } else{ for (ix=0;ix<nmx*nmy;ix++) smig[ix][iz] = 0.; } } if (adj){ for (iz=0;iz<nz;iz++){ // extrapolate receiver wavefield z = oz + dz*iz; if (z >= gz){ elastic_separate_3d(ux_xg,uy_xg,uz_xg,up_xg,us1_xg,us2_xg,w,dkx,nkx,nmx,omx,dmx,dky,nky,nmy,omy,dmy,1./po_p[iz],1./po_s[iz],p1,p2,true,adj); ssop(up_xg,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,dz,iz,vp,po_p,pd_p,p1,p2,true,false,verbose); ssop(us1_xg,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,dz,iz,vs,po_s,pd_s,p1,p2,true,false,verbose); ssop(us2_xg,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,dz,iz,vs,po_s,pd_s,p1,p2,true,false,verbose); elastic_separate_3d(ux_xg,uy_xg,uz_xg,up_xg,us1_xg,us2_xg,w,dkx,nkx,nmx,omx,dmx,dky,nky,nmy,omy,dmy,1./po_p[iz],1./po_s[iz],p1,p2,false,adj); for (imx=0;imx<nmx;imx++){ for (imy=0;imy<nmy;imy++){ mpp[imx*nmy*nthread + imy*nthread + ithread][iz] += factor*crealf(conjf(smig[imx*nmy + imy][iz])*up_xg[imx*nmy + imy]); mps1[imx*nmy*nthread + imy*nthread + ithread][iz] += factor*crealf(conjf(smig[imx*nmy + imy][iz])*us1_xg[imx*nmy + imy]); mps2[imx*nmy*nthread + imy*nthread + ithread][iz] += factor*crealf(conjf(smig[imx*nmy + imy][iz])*us2_xg[imx*nmy + imy]); } } } } } else{ for (iz=nz-1;iz>=0;iz--){ // extrapolate receiver wavefield z = oz + dz*iz; if (z >= gz){ elastic_separate_3d(ux_xg,uy_xg,uz_xg,up_xg,us1_xg,us2_xg,w,dkx,nkx,nmx,omx,dmx,dky,nky,nmy,omy,dmy,1./po_p[iz],1./po_s[iz],p1,p2,true,adj); for (ix=0;ix<nmx*nmy;ix++) up_xg[ix] += smig[ix][iz]*mpp[ix][iz]; for (ix=0;ix<nmx*nmy;ix++) us1_xg[ix] += smig[ix][iz]*mps1[ix][iz]; for (ix=0;ix<nmx*nmy;ix++) us2_xg[ix] += smig[ix][iz]*mps2[ix][iz]; ssop(up_xg,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,-dz,iz,vp,po_p,pd_p,p1,p2,false,false,verbose); ssop(us1_xg,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,-dz,iz,vs,po_s,pd_s,p1,p2,false,false,verbose); ssop(us2_xg,w,dkx,dky,nkx,nky,nmx,omx,dmx,nmy,omy,dmy,-dz,iz,vs,po_s,pd_s,p1,p2,false,false,verbose); elastic_separate_3d(ux_xg,uy_xg,uz_xg,up_xg,us1_xg,us2_xg,w,dkx,nkx,nmx,omx,dmx,dky,nky,nmy,omy,dmy,1./po_p[iz],1./po_s[iz],p1,p2,false,adj); } } for (ix=0;ix<nmx*nmy;ix++){ ux_g_wx[ix][iw] = ux_xg[ix]/sqrtf((float) ntfft); uy_g_wx[ix][iw] = uy_xg[ix]/sqrtf((float) ntfft); uz_g_wx[ix][iw] = uz_xg[ix]/sqrtf((float) ntfft); } } free1complex(ux_xg); free1complex(uy_xg); free1complex(uz_xg); free1complex(up_xg); free1complex(us1_xg); free1complex(us2_xg); free1complex(up_xs); free2complex(smig); return; } void ssop(complex *d_x, float w,float dkx,float dky,int nkx,int nky,int nmx,float omx,float dmx,int nmy,float omy,float dmy,float dz,int iz, float **v,float *po,float **pd, fftwf_plan p1,fftwf_plan p2, bool adj, bool src, bool verbose) { float kx,ky,s; complex L; int ik,ikx,iky,imx,imy; complex *d_k; fftwf_complex *a,*b; int lmx,lmy; if (nmx>100) lmx=30; else lmx=0; if (nmy>100) lmy=30; else lmy=0; a = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky); b = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky); d_k = alloc1complex(nkx*nky); if (adj){ for(imx=0; imx<nkx;imx++){ for(imy=0; imy<nky;imy++){ if (imx < nmx && imy < nmy) a[imx*nky + imy] = d_x[imx*nmy + imy]; else a[imx*nky + imy] = 0.; } } } else{ boundary_condition(d_x,nmx,lmx,nmy,lmy); for(imx=0; imx<nkx;imx++){ for(imy=0; imy<nky;imy++){ if (imx < nmx && imy < nmy){ L = cexpf(I*w*pd[imx*nmy + imy][iz]*dz); a[imx*nky + imy] = d_x[imx*nmy + imy]*L; // SS operator } else a[imx*nky + imy] = 0.; } } } fftwf_execute_dft(p1,a,a); for (ikx=0;ikx<nkx;ikx++){ kx = ikx<nkx/2. ? dkx*ikx : -(dkx*nkx - dkx*ikx); for (iky=0;iky<nky;iky++){ ky = iky<nky/2. ? dky*iky : -(dky*nky - dky*iky); s = (w*w)*(po[iz]*po[iz]) - (kx*kx) - (ky*ky); if (s>=0) L = cexpf(I*sqrtf(s)*dz); else L = cexpf(-0.2*sqrtf(fabsf(s))*fabsf(dz)); d_k[ikx*nky + iky] = ((complex) a[ikx*nky + iky])*L/sqrtf((float) nkx*nky); } } for(ik=0; ik<nkx*nky;ik++) b[ik] = (fftwf_complex) d_k[ik]; fftwf_execute_dft(p2,b,b); if (adj){ for(imx=0; imx<nkx;imx++){ for(imy=0; imy<nky;imy++){ if (imx < nmx && imy < nmy){ L = cexpf(I*w*pd[imx*nmy + imy][iz]*dz); d_x[imx*nmy + imy] = ((complex) b[imx*nky + imy])*L/sqrtf((float) nkx*nky); // SS operator } } } boundary_condition(d_x,nmx,lmx,nmy,lmy); } else{ for(imx=0; imx<nkx;imx++){ for(imy=0; imy<nky;imy++){ if (imx < nmx && imy < nmy){ d_x[imx*nmy + imy] = ((complex) b[imx*nky + imy])/sqrtf((float) nkx*nky); } } } } free1complex(d_k); fftwf_free(a); fftwf_free(b); return; } void f_op(complex *m,float *d,int nw,int nt,bool adj) { fftwf_complex *out1a,*in1b; float *in1a,*out1b; int ntfft,it,iw; fftwf_plan p1a,p1b; ntfft = (nw-1)*2; if (adj){ /* data --> model */ out1a = fftwf_malloc(sizeof(fftwf_complex) * nw); in1a = alloc1float(ntfft); p1a = fftwf_plan_dft_r2c_1d(ntfft, in1a, (fftwf_complex*)out1a, FFTW_ESTIMATE); for(it=0;it<nt;it++) in1a[it] = d[it]; for(it=nt;it<ntfft;it++) in1a[it] = 0.; fftwf_execute(p1a); for(iw=0;iw<nw;iw++) m[iw] = out1a[iw]; fftwf_destroy_plan(p1a); fftwf_free(in1a); fftwf_free(out1a); } else{ /* model --> data */ out1b = alloc1float(ntfft); in1b = fftwf_malloc(sizeof(fftwf_complex) * ntfft); p1b = fftwf_plan_dft_c2r_1d(ntfft, (fftwf_complex*)in1b, out1b, FFTW_ESTIMATE); for(iw=0;iw<nw;iw++) in1b[iw] = m[iw]; for(iw=nw;iw<ntfft;iw++) in1b[iw] = 0.; fftwf_execute(p1b); for(it=0;it<nt;it++) d[it] = out1b[it]; fftwf_destroy_plan(p1b); fftwf_free(in1b); fftwf_free(out1b); } return; } void progress_msg(float progress) { fprintf(stderr,"\r[%6.2f%% complete] ",progress*100); return; } float signf(float a) /*< sign of a float >*/ { float b; if (a>0.) b = 1.; else if (a<0.) b =-1.; else b = 0.; return b; } float signfnonzero(float a) /*< sign of a float, if a==0 then gives a value of 1. >*/ { float b; if (a>=0.) b = 1.; else b =-1.; return b; } int compare (const void * a, const void * b) { float fa = *(const float*) a; float fb = *(const float*) b; return (fa > fb) - (fa < fb); } int omp_thread_count() { int n = 0; #pragma omp parallel reduction(+:n) n += 1; return n; } void boundary_condition(complex *d_x,int nmx,int lmx,int nmy,int lmy) { int imx,imy; float tmx,tmy; tmx = 1.;tmy = 1.; for (imx=0;imx<nmx;imx++){ if (imx>=0 && imx<lmx) tmx = expf(-powf(0.015*((float) lmx - imx),2.)); if (imx>=lmx && imx<=nmx-lmx) tmx = 1.; if (imx>nmx-lmx && imx<nmx) tmx = expf(-powf(0.015*((float) imx - nmx + lmx),2.)); for (imy=0;imy<nmy;imy++){ if (imy>=0 && imy<lmy) tmy = expf(-powf(0.015*((float) lmy - imy),2.)); if (imy>=lmy && imy<=nmy-lmy) tmy = 1.; if (imy>nmy-lmy && imy<nmy) tmy = expf(-powf(0.015*((float) imy - nmy + lmy),2.)); d_x[imx*nmy + imy] *= tmx*tmy; } } return; } void elastic_separate_3d(complex *ux, complex *uy, complex *uz, complex *up, complex *us1, complex *us2, float w, float dkx, int nkx, int nmx, float omx, float dmx, float dky, int nky, int nmy, float omy, float dmy, float vp,float vs, fftwf_plan p1,fftwf_plan p2, bool sep, bool adj) { int imx,imy,ikx,iky,ik; fftwf_complex *a,*b; complex *ux_k,*uz_k,*up_k,*us2_k; float kx,kxp,kxs,kzp,kzs,sp,ss,norm,norm_p,norm_s; ux_k = alloc1complex(nkx*nky); //uy_k = alloc1complex(nkx*nky); uz_k = alloc1complex(nkx*nky); up_k = alloc1complex(nkx*nky); //us1_k = alloc1complex(nkx*nky); us2_k = alloc1complex(nkx*nky); a = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky); b = fftwf_malloc(sizeof(fftwf_complex) * nkx*nky); if (sep){ /* separation of wavefield components to wavefield potentials */ // x-component for(imx=0;imx<nkx;imx++){ for(imy=0;imy<nky;imy++){ a[imx*nky + imy] = (imx < nmx && imy < nmy) ? ux[imx*nmy + imy] : 0.; } } fftwf_execute_dft(p1,a,a); for(ik=0;ik<nkx*nky;ik++) ux_k[ik] = a[ik]/sqrtf(nkx*nky); // y-component //for(imx=0;imx<nkx;imx++){ // for(imy=0;imy<nky;imy++){ // a[imx*nky + imy] = (imx < nmx && imy < nmy) ? uy[imx*nmy + imy] : 0.; // } //} //fftwf_execute_dft(p1,a,a); //for(ik=0;ik<nkx*nky;ik++) uy_k[ik] = a[ik]/sqrtf(nkx*nky); // z-component for(imx=0;imx<nkx;imx++){ for(imy=0;imy<nky;imy++){ a[imx*nky + imy] = (imx < nmx && imy < nmy) ? uz[imx*nmy + imy] : 0.; } } fftwf_execute_dft(p1,a,a); for(ik=0;ik<nkx*nky;ik++) uz_k[ik] = a[ik]/sqrtf(nkx*nky); for (ikx=0;ikx<nkx;ikx++){ kx = ikx<nkx/2. ? dkx*ikx : -(dkx*nkx - dkx*ikx); for (iky=0;iky<nky;iky++){ sp = w*w/(vp*vp) - kx*kx; ss = w*w/(vs*vs) - kx*kx; kzp = sp > 0. ? sqrtf(sp) : 0.; kzs = ss > 0. ? sqrtf(ss) : 0.; norm_p = sqrtf(kx*kx + kzp*kzp); norm_s = sqrtf(kx*kx + kzs*kzs); kxp = kx/norm_p; kxs = kx/norm_s; kzp = kzp/norm_p; kzs = kzs/norm_s; norm = kxp*kxs + kzp*kzs; if (norm >= 0.1 && w >= 20.){ if (!adj){ up_k[ikx*nky + iky] = ( kxs*ux_k[ikx*nky + iky] + kzs*uz_k[ikx*nky + iky])/norm; us2_k[ikx*nky + iky] = (-kzp*ux_k[ikx*nky + iky] + kxp*uz_k[ikx*nky + iky])/norm; } else{ up_k[ikx*nky + iky] = kxp*ux_k[ikx*nky + iky] + kzp*uz_k[ikx*nky + iky]; us2_k[ikx*nky + iky] = -kzs*ux_k[ikx*nky + iky] + kxs*uz_k[ikx*nky + iky]; } } else{ up_k[ikx*nky + iky] = uz_k[ikx*nky + iky]; us2_k[ikx*nky + iky] = ux_k[ikx*nky + iky]; } } } for (ik=0;ik<nkx*nky;ik++) b[ik] = up_k[ik]; fftwf_execute_dft(p2,b,b); for(imx=0; imx<nkx;imx++){ for(imy=0; imy<nky;imy++){ if (imx < nmx && imy < nmy) up[imx*nmy + imy] = b[imx*nky + imy]/sqrtf(nkx*nky); } } //for (ik=0;ik<nkx*nky;ik++) b[ik] = us1_k[ik]; //fftwf_execute_dft(p2,b,b); //for(imx=0; imx<nkx;imx++){ // for(imy=0; imy<nky;imy++){ // if (imx < nmx && imy < nmy) us1[imx*nmy + imy] = b[imx*nky + imy]/sqrtf(nkx*nky); // } //} for (ik=0;ik<nkx*nky;ik++) b[ik] = us2_k[ik]; fftwf_execute_dft(p2,b,b); for(imx=0; imx<nkx;imx++){ for(imy=0; imy<nky;imy++){ if (imx < nmx && imy < nmy) us2[imx*nmy + imy] = b[imx*nky + imy]/sqrtf(nkx*nky); } } } else { /* combination of wavefield potentials to wavefield components */ // p-component for(imx=0;imx<nkx;imx++){ for(imy=0;imy<nky;imy++){ a[imx*nky + imy] = (imx < nmx && imy < nmy) ? up[imx*nmy + imy] : 0.; } } fftwf_execute_dft(p1,a,a); for(ik=0;ik<nkx*nky;ik++) up_k[ik] = a[ik]/sqrtf(nkx*nky); // s1-component //for(imx=0;imx<nkx;imx++){ // for(imy=0;imy<nky;imy++){ // a[imx*nky + imy] = (imx < nmx && imy < nmy) ? us1[imx*nmy + imy] : 0.; // } //} //fftwf_execute_dft(p1,a,a); //for(ik=0;ik<nkx*nky;ik++) us1_k[ik] = a[ik]/sqrtf(nkx*nky); // s2-component for(imx=0;imx<nkx;imx++){ for(imy=0;imy<nky;imy++){ a[imx*nky + imy] = (imx < nmx && imy < nmy) ? us2[imx*nmy + imy] : 0.; } } fftwf_execute_dft(p1,a,a); for(ik=0;ik<nkx*nky;ik++) us2_k[ik] = a[ik]/sqrtf(nkx*nky); for (ikx=0;ikx<nkx;ikx++){ kx = ikx<nkx/2. ? dkx*ikx : -(dkx*nkx - dkx*ikx); for (iky=0;iky<nky;iky++){ sp = w*w/(vp*vp) - kx*kx; ss = w*w/(vs*vs) - kx*kx; kzp = sp > 0. ? sqrtf(sp) : 0.; kzs = ss > 0. ? sqrtf(ss) : 0.; norm_p = sqrtf(kx*kx + kzp*kzp); norm_s = sqrtf(kx*kx + kzs*kzs); kxp = kx/norm_p; kxs = kx/norm_s; kzp = kzp/norm_p; kzs = kzs/norm_s; norm = kxp*kxs + kzp*kzs; if (norm >= 0.1 && w >= 20.){ if (!adj){ ux_k[ikx*nky + iky] = kxp*up_k[ikx*nky + iky] - kzs*us2_k[ikx*nky + iky]; uz_k[ikx*nky + iky] = kzp*up_k[ikx*nky + iky] + kxs*us2_k[ikx*nky + iky]; } else{ ux_k[ikx*nky + iky] = (kxs*up_k[ikx*nky + iky] - kzp*us2_k[ikx*nky + iky])/norm; uz_k[ikx*nky + iky] = (kzs*up_k[ikx*nky + iky] + kxp*us2_k[ikx*nky + iky])/norm; } } else{ ux_k[ikx*nky + iky] = us2_k[ikx*nky + iky]; uz_k[ikx*nky + iky] = up_k[ikx*nky + iky]; } } } for (ik=0;ik<nkx*nky;ik++) b[ik] = ux_k[ik]; fftwf_execute_dft(p2,b,b); for(imx=0; imx<nkx;imx++){ for(imy=0; imy<nky;imy++){ if (imx < nmx && imy < nmy) ux[imx*nmy + imy] = b[imx*nky + imy]/sqrtf(nkx*nky); } } //for (ik=0;ik<nkx*nky;ik++) b[ik] = uy_k[ik]; //fftwf_execute_dft(p2,b,b); //for(imx=0; imx<nkx;imx++){ // for(imy=0; imy<nky;imy++){ // if (imx < nmx && imy < nmy) uy[imx*nmy + imy] = b[imx*nky + imy]/sqrtf(nkx*nky); // } //} for (ik=0;ik<nkx*nky;ik++) b[ik] = uz_k[ik]; fftwf_execute_dft(p2,b,b); for(imx=0; imx<nkx;imx++){ for(imy=0; imy<nky;imy++){ if (imx < nmx && imy < nmy) uz[imx*nmy + imy] = b[imx*nky + imy]/sqrtf(nkx*nky); } } } fftwf_free(a); fftwf_free(b); free1complex(ux_k); //free1complex(uy_k); free1complex(uz_k); free1complex(up_k); //free1complex(us1_k); free1complex(us2_k); return; }
omp-par.c
#include <stdio.h> #include <time.h> int main() { struct timespec requestStart, requestEnd; clock_gettime(CLOCK_REALTIME, &requestStart); #pragma omp parallel { } clock_gettime(CLOCK_REALTIME, &requestEnd); printf("Parallel region took %d second, and %d nanoseconds\n", requestEnd.tv_sec - requestStart.tv_sec, requestEnd.tv_nsec - requestStart.tv_nsec); #pragma omp parallel { } clock_gettime(CLOCK_REALTIME, &requestEnd); printf("Total of both parallel regions took %d second, and %d nanoseconds\n", requestEnd.tv_sec - requestStart.tv_sec, requestEnd.tv_nsec - requestStart.tv_nsec); return 0; }
stribog_fmt_plug.c
/* * GOST R 34.11-2012 cracker patch for JtR. Hacked together during * the Hash Runner 2015 contest by Dhiru Kholia and Aleksey Cherepanov. * * Based on https://www.streebog.net/ and https://github.com/sjinks/php-stribog * code. See "LICENSE.gost" for licensing details of the original code. */ #include "arch.h" #if __SSE4_1__ #if FMT_EXTERNS_H extern struct fmt_main fmt_stribog_256; extern struct fmt_main fmt_stribog_512; #elif FMT_REGISTERS_H john_register_one(&fmt_stribog_256); john_register_one(&fmt_stribog_512); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "gost3411-2012-sse41.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 512 // XXX #endif #endif #include "memdbg.h" #define FORMAT_LABEL "stribog" #define FORMAT_NAME "" #define TAG256 "$stribog256$" #define TAG256_LENGTH (sizeof(TAG256)-1) #define TAG512 "$stribog512$" #define TAG512_LENGTH (sizeof(TAG512)-1) #define TAG_LENGTH TAG256_LENGTH #define FORMAT_TAG TAG256 #define ALGORITHM_NAME "GOST R 34.11-2012 128/128 SSE4.1 1x" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 64 - 1 #define CIPHERTEXT256_LENGTH 64 #define CIPHERTEXT512_LENGTH 128 #define CIPHERTEXT_LENGTH CIPHERTEXT256_LENGTH #define BINARY_SIZE_256 32 #define BINARY_SIZE_512 64 #define SALT_SIZE 0 #define SALT_ALIGN 1 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests stribog_256_tests[] = { {"$stribog256$bbe19c8d2025d99f943a932a0b365a822aa36a4c479d22cc02c8973e219a533f", ""}, /* {"3f539a213e97c802cc229d474c6aa32a825a360b2a933a949fd925208d9ce1bb", ""}, */ /* 9d151eefd8590b89daa6ba6cb74af9275dd051026bb149a452fd84e5e57b5500 */ {"$stribog256$00557be5e584fd52a449b16b0251d05d27f94ab76cbaa6da890b59d8ef1e159d", "012345678901234567890123456789012345678901234567890123456789012"}, {NULL} }; static struct fmt_tests stribog_512_tests[] = { /* 8e945da209aa869f0455928529bcae4679e9873ab707b55315f56ceb98bef0a7362f715528356ee83cda5f2aac4c6ad2ba3a715c1bcd81cb8e9f90bf4c1c1a8a */ {"$stribog512$8a1a1c4cbf909f8ecb81cd1b5c713abad26a4cac2a5fda3ce86e352855712f36a7f0be98eb6cf51553b507b73a87e97946aebc29859255049f86aa09a25d948e", ""}, /* 1b54d01a4af5b9d5cc3d86d68d285462b19abc2475222f35c085122be4ba1ffa00ad30f8767b3a82384c6574f024c311e2a481332b08ef7f41797891c1646f48 */ {"$stribog512$486f64c1917879417fef082b3381a4e211c324f074654c38823a7b76f830ad00fa1fbae42b1285c0352f227524bc9ab16254288dd6863dccd5b9f54a1ad0541b", "012345678901234567890123456789012345678901234567890123456789012"}, {NULL} }; #define make_full_static_buf(type, var, len) static type (var)[(len)] #define make_dynamic_static_buf(type, var, len) \ static type *var; \ if (!var) \ var = mem_alloc_tiny((len), MEM_ALIGN_WORD) #if 1 #define make_static_buf make_dynamic_static_buf #else #define make_static_buf make_full_static_buf #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE_512 / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif if (!saved_key) { saved_key = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_key), MEM_ALIGN_SIMD); } if (!crypt_out) crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static char *split_256(char *ciphertext, int index, struct fmt_main *self) { make_static_buf(char, out, TAG_LENGTH + CIPHERTEXT_LENGTH + 1); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1); strlwr(out + TAG_LENGTH); return out; } static int valid_256(char *ciphertext, struct fmt_main *self) { char *p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; /* else */ /* return 0; */ if (strlen(p) != CIPHERTEXT_LENGTH) return 0; while(*p) if(atoi16[ARCH_INDEX(*p++)]==0x7f) return 0; return 1; } static void *get_binary_256(char *ciphertext) { static unsigned char *out; char *p = ciphertext; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE_256, MEM_ALIGN_WORD); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = ciphertext + TAG_LENGTH; for (i = 0; i < BINARY_SIZE_256; i++) { out[BINARY_SIZE_256 - i - 1] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #undef TAG_LENGTH #undef FORMAT_TAG #undef CIPHERTEXT_LENGTH #define TAG_LENGTH TAG512_LENGTH #define FORMAT_TAG TAG512 #define CIPHERTEXT_LENGTH CIPHERTEXT512_LENGTH static char *split_512(char *ciphertext, int index, struct fmt_main *self) { make_static_buf(char, out, TAG_LENGTH + CIPHERTEXT_LENGTH + 1); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1); strlwr(out + TAG_LENGTH); return out; } static int valid_512(char *ciphertext, struct fmt_main *self) { char *p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; /* else */ /* return 0; */ if (strlen(p) != CIPHERTEXT_LENGTH) return 0; while(*p) if(atoi16[ARCH_INDEX(*p++)]==0x7f) return 0; return 1; } static void *get_binary_512(char *ciphertext) { static unsigned char *out; char *p = ciphertext; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE_512, MEM_ALIGN_WORD); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = ciphertext + TAG_LENGTH; for (i = 0; i < BINARY_SIZE_512; i++) { out[BINARY_SIZE_512 - i - 1] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #undef TAG_LENGTH #undef FORMAT_TAG #undef CIPHERTEXT_LENGTH /* static int valid_256(char *ciphertext, struct fmt_main *self) */ /* { */ /* return valid(ciphertext, self, 64); */ /* } */ /* static int valid_512(char *ciphertext, struct fmt_main *self) */ /* { */ /* return valid(ciphertext, self, 128); */ /* } */ static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void stribog256_init(void* context) { size_t offset = (((size_t)context + 15) & ~0x0F) - (size_t)context; void *ctx = (char*)context + offset; GOST34112012Init(ctx, 256); } static void stribog512_init(void* context) { size_t offset = (((size_t)context + 15) & ~0x0F) - (size_t)context; void *ctx = (char*)context + offset; GOST34112012Init(ctx, 512); } static void stribog_update(void* context, const unsigned char* buf, unsigned int count) { size_t offset = (((size_t)context + 15) & ~0x0F) - (size_t)context; void *ctx = (char*)context + offset; offset = (((size_t)buf + 15) & ~0x0F) - (size_t)buf; if (!offset) { GOST34112012Update(ctx, buf, count); } else { ALIGN(16) unsigned char tmp[15]; assert(offset < 16); memcpy(tmp, buf, offset); GOST34112012Update(ctx, tmp, offset); GOST34112012Update(ctx, buf + offset, count - offset); } } static void stribog_final(unsigned char* digest, void* context) { size_t offset = (((size_t)context + 15) & ~0x0F) - (size_t)context; void *ctx = (char*)context + offset; GOST34112012Final(ctx, digest); } static int crypt_256(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { /* GOST34112012Context ctx; GOST34112012Init(&ctx, 256); GOST34112012Update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); GOST34112012Final(&ctx, (unsigned char*)crypt_out[index]); */ GOST34112012Context ctx[2]; // alignment stuff stribog256_init((void *)ctx); stribog_update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); stribog_final((unsigned char*)crypt_out[index], &ctx); } return count; } static int crypt_512(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { /* GOST34112012Context ctx; GOST34112012Init(&ctx, 512); GOST34112012Update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); GOST34112012Final(&ctx, (unsigned char*)crypt_out[index]); */ GOST34112012Context ctx[2]; // alignment stuff stribog512_init((void *)ctx); stribog_update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); stribog_final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one_256(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE_256); } static int cmp_one_512(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE_512); } static int cmp_exact(char *source, int index) { return 1; } static void stribog_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_stribog_256 = { { "Stribog-256", FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE_256, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { TAG256 }, stribog_256_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid_256, split_256, get_binary_256, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, stribog_set_key, get_key, fmt_default_clear_keys, crypt_256, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one_256, cmp_exact } }; struct fmt_main fmt_stribog_512 = { { "Stribog-512", FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE_512, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { TAG512 }, stribog_512_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid_512, split_512, get_binary_512, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, stribog_set_key, get_key, fmt_default_clear_keys, crypt_512, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one_512, cmp_exact } }; #endif /* plugin stanza */ #else #if !defined(FMT_EXTERNS_H) && !defined(FMT_REGISTERS_H) #ifdef __GNUC__ #warning Stribog-256 and Stribog-512 formats require SSE 4.1, formats disabled #elif _MSC_VER #pragma message(": warning Stribog-256 and Stribog-512 formats require SSE 4.1, formats disabled:") #endif #endif #endif /* __SSE4_1__ */
ast-dump-openmp-target-teams-distribute-parallel-for-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp target teams distribute parallel for simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp target teams distribute parallel for simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp target teams distribute parallel for simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp target teams distribute parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp target teams distribute parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:4:1, col:54> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:10:1, col:54> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:17:1, col:66> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:55, col:65> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:64> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:64> 'int' 1 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:24:1, col:66> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:55, col:65> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:64> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:64> 'int' 2 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:31:1, col:66> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:55, col:65> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:64> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:64> 'int' 2 // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
pvmt-OpenMP.c
#include <stdlib.h> #include <stdio.h> #include<time.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif //#define PRINT_ALL //#define VECTOR_GLOBAL #define VECTOR_DYNAMIC #ifdef VECTOR_GLOBAL #define MAX 32768 //=2^10 double v[MAX], m[MAX][MAX], r[MAX]; #endif omp_sched_t charToSchedType (char c) { omp_sched_t t; if (c == 'S') t = omp_sched_static; else if (c == 'D') t = omp_sched_dynamic; else if (c == 'G') t = omp_sched_guided; else if (c == 'A') t = omp_sched_auto; else { printf(" Error en tipo de schedule.\n" " Puede ser (S)tatic - (D)ynamic - (G)uided - (A)uto\n"); exit(-1); } return t; } char* printEnum (omp_sched_t type) { char * ret; if (type == omp_sched_static) ret = "Static"; else if (type == omp_sched_dynamic) ret = "Dynamic"; else if (type == omp_sched_guided) ret = "Guided"; else if (type == omp_sched_auto) ret = "Auto"; return ret; } int main(int argc,char** argv){ if (argc<4){ printf("Error en nº de parámetros. Ejecución:\n %s <Tipo de schedule> <chunk> <iteraciones>\n", argv[0]); printf("El schedule puede ser (S)tatic - (D)ynamic - (G)uided - (A)uto\n"); printf("Introducir chunk = 0 para tomar el valor por defecto según el schedueling.\n"); exit(-1); } omp_sched_t sched_type = charToSchedType(argv[1][0]); int chunk = atoi(argv[2]); if (chunk == 0) { if (sched_type == 'S' || sched_type == 'G') chunk = omp_get_num_threads(); else chunk = 1; } else if (chunk < 0) { chunk = 1; printf("\n Valor de chunk negativo. Queda fijado a 1.\n"); } unsigned int N = atoi(argv[3]); // Máximo N =2^32 -1=4294967295 (sizeof(unsigned int) = 4 B) if (N < 1) { printf("Error - Número de iteraciones negativo.\n"); exit(-1); } struct timespec cgt1,cgt2; double ncgt; //para tiempo de ejecución int i, j; omp_set_schedule(sched_type, chunk); /* // Comprobamos que hemos fijado bien el schedueling y el chunk omp_get_schedule(&sched_type, &chunk); printf("\n run-shed-var: Schedule %s --- Chunk = %d\n", printEnum(sched_type), chunk); */ #ifdef VECTOR_GLOBAL if (N>MAX) N=MAX; printf("\n Número de iteraciones refijado al máximo posible: %d\n", N); #endif #ifdef VECTOR_DYNAMIC double *v, **m, *r; v = (double*) malloc(N*sizeof(double)); // malloc necesita el tamaño en bytes m = (double**) malloc(N*sizeof(double*)); //si no hay espacio suficiente malloc devuelve NULL for (i=0; i<N; i++) m[i] = (double*) malloc(N*sizeof(double)); r = (double*) malloc(N*sizeof(double)); if ((v==NULL) || (m==NULL) || (r==NULL)) { printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } #endif //Inicializar vector y matriz #pragma omp parallel for private(i) for (j=0; j<N; j++) { v[j] = 2.5; m[0][j] = 1.1; for (i=1; i<=j; i++) m[i][j] = -m[i-1][j]; for (; i<N; i++) m[i][j] = 0; } //Comprobamos la incialización #ifdef PRINT_ALL printf("\n Vector:\n"); for (i=0; i<N; i++) { printf("\t%0.1f", v[i]); } printf("\n\n Matriz: \n"); for (i=0; i<N; i++) { for (j=0; j<N; j++) printf("\t%0.1f", m[i][j]); printf("\n\n"); } #endif clock_gettime(CLOCK_REALTIME,&cgt1); //Calcular el producto double sum; #pragma omp parallel for private(sum, i) for (j=0; j<N; j++) { sum = 0; for (i=0; i<=j; i++) sum += v[i]*m[i][j]; r[j] = sum; } clock_gettime(CLOCK_REALTIME,&cgt2); ncgt = (double) (cgt2.tv_sec - cgt1.tv_sec) + (double) ((cgt2.tv_nsec - cgt1.tv_nsec)/(1.e+9)); //Imprimir resultado del producto printf("\n Resultado:\n"); #ifdef PRINT_ALL for (i=0; i<N; i++) { printf("\t%0.2f", r[i]); } printf("\n"); #else printf("Primer valor: %0.1f \t Último valor: %0.1f \n", r[0], r[N-1]); #endif printf("\n Tiempo de ejecución(s): %11.9f\n\n", ncgt); #ifdef VECTOR_DYNAMIC free(v); // libera el espacio reservado para v free(m); // libera el espacio reservado para m free(r); #endif return 0; }
channel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC H H AAA N N N N EEEEE L % % C H H A A NN N NN N E L % % C HHHHH AAAAA N N N N N N EEE L % % C H H A A N NN N NN E L % % CCCC H H A A N N N N EEEEE LLLLL % % % % % % MagickCore Image Channel Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a n n e l F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChannelFxImage() applies a channel expression to the specified image. The % expression consists of one or more channels, either mnemonic or numeric (e.g. % red, 1), separated by actions as follows: % % <=> exchange two channels (e.g. red<=>blue) % => copy one channel to another channel (e.g. red=>green) % = assign a constant value to a channel (e.g. red=50%) % , write new image channels in the specified order (e.g. red, green) % | add a new output image for the next set of channel operations % ; move to the next input image for the source of channel data % % For example, to create 3 grayscale images from the red, green, and blue % channels of an image, use: % % -channel-fx "red; green; blue" % % A channel without an operation symbol implies separate (i.e, semicolon). % % The format of the ChannelFxImage method is: % % Image *ChannelFxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A channel expression. % % o exception: return any errors or warnings in this structure. % */ typedef enum { ExtractChannelOp, AssignChannelOp, ExchangeChannelOp, TransferChannelOp } ChannelFx; static MagickBooleanType ChannelImage(Image *destination_image, const PixelChannel destination_channel,const ChannelFx channel_op, const Image *source_image,const PixelChannel source_channel, const Quantum pixel,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; size_t height, width; ssize_t y; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); destination_view=AcquireAuthenticCacheView(destination_image,exception); height=MagickMin(source_image->rows,destination_image->rows); width=MagickMin(source_image->columns,destination_image->columns); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { PixelTrait destination_traits, source_traits; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(destination_view,0,y, destination_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } destination_traits=GetPixelChannelTraits(destination_image, destination_channel); source_traits=GetPixelChannelTraits(source_image,source_channel); if ((destination_traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; for (x=0; x < (ssize_t) width; x++) { if (channel_op == AssignChannelOp) SetPixelChannel(destination_image,destination_channel,pixel,q); else SetPixelChannel(destination_image,destination_channel, GetPixelChannel(source_image,source_channel,p),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(destination_image); } if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *ChannelFxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define ChannelFxImageTag "ChannelFx/Image" ChannelFx channel_op; ChannelType channel_mask; char token[MagickPathExtent]; const char *p; const Image *source_image; double pixel; Image *destination_image; MagickBooleanType status; PixelChannel source_channel, destination_channel; ssize_t channels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); source_image=image; destination_image=CloneImage(source_image,0,0,MagickTrue,exception); if (destination_image == (Image *) NULL) return((Image *) NULL); if (expression == (const char *) NULL) return(destination_image); status=SetImageStorageClass(destination_image,DirectClass,exception); if (status == MagickFalse) { destination_image=GetLastImageInList(destination_image); return((Image *) NULL); } destination_channel=RedPixelChannel; channel_mask=UndefinedChannel; pixel=0.0; p=(char *) expression; (void) GetNextToken(p,&p,MagickPathExtent,token); channel_op=ExtractChannelOp; for (channels=0; *token != '\0'; ) { ssize_t i; /* Interpret channel expression. */ switch (*token) { case ',': { (void) GetNextToken(p,&p,MagickPathExtent,token); break; } case '|': { if (GetNextImageInList(source_image) != (Image *) NULL) source_image=GetNextImageInList(source_image); else source_image=GetFirstImageInList(source_image); (void) GetNextToken(p,&p,MagickPathExtent,token); break; } case ';': { Image *canvas; (void) SetPixelChannelMask(destination_image,channel_mask); if ((channel_op == ExtractChannelOp) && (channels == 1)) { (void) SetPixelMetaChannels(destination_image,0,exception); (void) SetImageColorspace(destination_image,GRAYColorspace, exception); } canvas=CloneImage(source_image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) { destination_image=DestroyImageList(destination_image); return(destination_image); } AppendImageToList(&destination_image,canvas); destination_image=GetLastImageInList(destination_image); status=SetImageStorageClass(destination_image,DirectClass,exception); if (status == MagickFalse) { destination_image=GetLastImageInList(destination_image); return((Image *) NULL); } (void) GetNextToken(p,&p,MagickPathExtent,token); channels=0; destination_channel=RedPixelChannel; channel_mask=UndefinedChannel; break; } default: break; } i=ParsePixelChannelOption(token); if (i < 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnrecognizedChannelType","`%s'",token); destination_image=DestroyImageList(destination_image); return(destination_image); } source_channel=(PixelChannel) i; channel_op=ExtractChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == '<') { channel_op=ExchangeChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); } if (*token == '=') { if (channel_op != ExchangeChannelOp) channel_op=AssignChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); } if (*token == '>') { if (channel_op != ExchangeChannelOp) channel_op=TransferChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); } switch (channel_op) { case AssignChannelOp: case ExchangeChannelOp: case TransferChannelOp: { if (channel_op == AssignChannelOp) pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0); else { i=ParsePixelChannelOption(token); if (i < 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnrecognizedChannelType","`%s'",token); destination_image=DestroyImageList(destination_image); return(destination_image); } } destination_channel=(PixelChannel) i; if (i >= (ssize_t) GetPixelChannels(destination_image)) (void) SetPixelMetaChannels(destination_image,(size_t) ( destination_channel-GetPixelChannels(destination_image)+1), exception); if (image->colorspace != UndefinedColorspace) switch (destination_channel) { case RedPixelChannel: case GreenPixelChannel: case BluePixelChannel: case BlackPixelChannel: case IndexPixelChannel: break; case AlphaPixelChannel: { destination_image->alpha_trait=BlendPixelTrait; break; } case CompositeMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | CompositeMaskChannel); break; } case ReadMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | ReadMaskChannel); break; } case WriteMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | WriteMaskChannel); break; } case MetaPixelChannel: default: { (void) SetPixelMetaChannels(destination_image,(size_t) ( destination_channel-GetPixelChannels(destination_image)+1), exception); break; } } channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token)); if (((channels >= 1) || (destination_channel >= 1)) && (IsGrayColorspace(destination_image->colorspace) != MagickFalse)) (void) SetImageColorspace(destination_image,sRGBColorspace,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); break; } default: break; } status=ChannelImage(destination_image,destination_channel,channel_op, source_image,source_channel,ClampToQuantum(pixel),exception); if (status == MagickFalse) { destination_image=DestroyImageList(destination_image); break; } channels++; if (channel_op == ExchangeChannelOp) { status=ChannelImage(destination_image,source_channel,channel_op, source_image,destination_channel,ClampToQuantum(pixel),exception); if (status == MagickFalse) { destination_image=DestroyImageList(destination_image); break; } channels++; } switch (channel_op) { case ExtractChannelOp: { channel_mask=(ChannelType) (channel_mask | (1UL << destination_channel)); destination_channel=(PixelChannel) (destination_channel+1); break; } default: break; } status=SetImageProgress(source_image,ChannelFxImageTag,p-expression, strlen(expression)); if (status == MagickFalse) break; } (void) SetPixelChannelMask(destination_image,channel_mask); if ((channel_op == ExtractChannelOp) && (channels == 1)) { (void) SetPixelMetaChannels(destination_image,0,exception); (void) SetImageColorspace(destination_image,GRAYColorspace,exception); } return(GetFirstImageInList(destination_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *images,const ColorspaceType colorspace, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o colorspace: the image colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse) { combine_image=DestroyImage(combine_image); return((Image *) NULL); } if (colorspace != UndefinedColorspace) (void) SetImageColorspace(combine_image,colorspace,exception); else if (fabs(image->gamma-1.0) <= MagickEpsilon) (void) SetImageColorspace(combine_image,RGBColorspace,exception); else (void) SetImageColorspace(combine_image,sRGBColorspace,exception); switch (combine_image->colorspace) { case UndefinedColorspace: case sRGBColorspace: { if (GetImageListLength(image) > 3) combine_image->alpha_trait=BlendPixelTrait; break; } case LinearGRAYColorspace: case GRAYColorspace: { if (GetImageListLength(image) > 1) combine_image->alpha_trait=BlendPixelTrait; break; } case CMYKColorspace: { if (GetImageListLength(image) > 4) combine_image->alpha_trait=BlendPixelTrait; break; } default: break; } /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireAuthenticCacheView(combine_image,exception); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; Quantum *pixels; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t i; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } next=image; for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++) { register ssize_t x; PixelChannel channel = GetPixelChannelChannel(combine_image,i); PixelTrait traits = GetPixelChannelTraits(combine_image,channel); if (traits == UndefinedPixelTrait) continue; if (next == (Image *) NULL) continue; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const Quantum *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { if (x < (ssize_t) next->columns) { q[i]=GetPixelGray(next,p); p+=GetPixelChannels(next); } q+=GetPixelChannels(combine_image); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CombineImageTag,progress, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImage() separates a channel from the image and returns it as a % grayscale image. % % The format of the SeparateImage method is: % % Image *SeparateImage(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the image channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImage(const Image *image, const ChannelType channel_type,ExceptionInfo *exception) { #define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01) #define SeparateImageTag "Separate/Image" CacheView *image_view, *separate_view; Image *separate_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image=CloneImage(image,0,0,MagickTrue,exception); if (separate_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse) { separate_image=DestroyImage(separate_image); return((Image *) NULL); } separate_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(separate_image,GRAYColorspace,exception); separate_image->gamma=image->gamma; /* Separate image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); separate_view=AcquireAuthenticCacheView(separate_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; SetPixelChannel(separate_image,GrayPixelChannel,(Quantum) 0,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (GetChannelBit(channel_type,channel) == 0)) continue; SetPixelChannel(separate_image,GrayPixelChannel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(separate_image); } if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } separate_view=DestroyCacheView(separate_view); image_view=DestroyCacheView(image_view); (void) SetImageChannelMask(separate_image,DefaultChannels); if (status == MagickFalse) separate_image=DestroyImage(separate_image); return(separate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % Image *SeparateImages(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception) { Image *images, *separate_image; register ssize_t i; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; separate_image=SeparateImage(image,(ChannelType) (1UL << channel), exception); if (separate_image != (Image *) NULL) AppendImageToList(&images,separate_image); } if (images == (Image *) NULL) images=SeparateImage(image,UndefinedChannel,exception); return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelOption alpha_type,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel, % DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel, % OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel, % and TransparentAlphaChannel. % % o exception: return any errors or warnings in this structure. % */ static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p, const double alpha,const Quantum *q,const double beta, Quantum *composite) { double Da, gamma, Sa; register ssize_t i; /* Compose pixel p over pixel q with the given alpha. */ Sa=QuantumScale*alpha; Da=QuantumScale*beta, gamma=Sa*(-Da)+Sa+Da; gamma=PerceptibleReciprocal(gamma); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; switch (channel) { case RedPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->red,alpha)); break; } case GreenPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->green,alpha)); break; } case BluePixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->blue,alpha)); break; } case BlackPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->black,alpha)); break; } case AlphaPixelChannel: { composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da)); break; } default: break; } } } MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelOption alpha_type,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { image->alpha_trait=BlendPixelTrait; break; } case AssociateAlphaChannel: { /* Associate alpha. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (channel == AlphaPixelChannel) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=CopyPixelTrait; return(status); } case BackgroundAlphaChannel: { /* Set transparent pixels to background color. */ if (image->alpha_trait == UndefinedPixelTrait) break; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,q) == TransparentAlpha) { SetPixelViaPixelInfo(image,&image->background_color,q); SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case CopyAlphaChannel: { image->alpha_trait=UpdatePixelTrait; status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0, exception); break; } case DeactivateAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=CopyPixelTrait; break; } case DisassociateAlphaChannel: { /* Disassociate alpha. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image->alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma, Sa; register ssize_t i; Sa=QuantumScale*GetPixelAlpha(image,q); gamma=PerceptibleReciprocal(Sa); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (channel == AlphaPixelChannel) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=UndefinedPixelTrait; return(status); } case DiscreteAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=UpdatePixelTrait; break; } case ExtractAlphaChannel: { status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0, exception); image->alpha_trait=UndefinedPixelTrait; break; } case OffAlphaChannel: { image->alpha_trait=UndefinedPixelTrait; break; } case OnAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=BlendPixelTrait; break; } case OpaqueAlphaChannel: { status=SetImageAlpha(image,OpaqueAlpha,exception); break; } case RemoveAlphaChannel: { /* Remove transparency. */ if (image->alpha_trait == UndefinedPixelTrait) break; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { FlattenPixelInfo(image,&image->background_color, image->background_color.alpha,q,(double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=image->background_color.alpha_trait; break; } case SetAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); break; } case ShapeAlphaChannel: { PixelInfo background; /* Remove transparency. */ ConformPixelInfo(image,&image->background_color,&background,exception); background.alpha_trait=BlendPixelTrait; image->alpha_trait=BlendPixelTrait; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=background; for (x=0; x < (ssize_t) image->columns; x++) { pixel.alpha=GetPixelIntensity(image,q); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); break; } case TransparentAlphaChannel: { image->alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) SetPixelAlpha(image,TransparentAlpha,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return(status); (void) SetPixelChannelMask(image,image->channel_mask); return(SyncImagePixelCache(image,exception)); }
mixed_tentusscher_myo_epi_2004_S3_4.c
// Scenario 3 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt + Rc) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S3_4.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.7222555857775,0.00124832587159023,0.783462712179595,0.783274260125311,0.000170985504910724,0.486692890449150,0.00290823499179711,0.999998398101156,1.88451728840305e-08,1.85263533592764e-05,0.999776650901527,1.00720674152297,0.999996768057829,4.25428531899888e-05,0.238365528186353,10.2732995493148,139.602683230087}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.6680147002871,0.000430530939075394,0.000127852575589828,0.000356808091092550,0.263201677596404,0.118395256371924,0.182181143961952,5.07547129145478,0.0155941993462387,1.84656256314423,1088.39960685796,0.000350028084311740,0.555667945632962,0.00905447310372078,0.00477222574684051,6.97994762709882e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
sparse_block_matrix_diagonal.h
// g2o - General Graph Optimization // Copyright (C) 2011 R. Kuemmerle, G. Grisetti, W. Burgard // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED // TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef G2O_SPARSE_BLOCK_MATRIX_DIAGONAL_H #define G2O_SPARSE_BLOCK_MATRIX_DIAGONAL_H #include <vector> #include <Eigen/Core> #include <Eigen/StdVector> #include "g2o/config.h" #include "matrix_operations.h" namespace g2o { /** * \brief Sparse matrix which uses blocks on the diagonal * * This class is used as a const view on a SparseBlockMatrix * which allows a faster iteration over the elements of the * matrix. */ template <class MatrixType> class SparseBlockMatrixDiagonal { public: //! this is the type of the elementary block, it is an Eigen::Matrix. typedef MatrixType SparseMatrixBlock; //! columns of the matrix int cols() const { return _blockIndices.size() ? _blockIndices.back() : 0; } //! rows of the matrix int rows() const { return _blockIndices.size() ? _blockIndices.back() : 0; } typedef std::vector<MatrixType, Eigen::aligned_allocator<MatrixType>> DiagonalVector; SparseBlockMatrixDiagonal(const std::vector<int> &blockIndices) : _blockIndices(blockIndices) {} //! how many rows/cols does the block at block-row / block-column r has? inline int dimOfBlock(int r) const { return r ? _blockIndices[r] - _blockIndices[r - 1] : _blockIndices[0]; } //! where does the row /col at block-row / block-column r starts? inline int baseOfBlock(int r) const { return r ? _blockIndices[r - 1] : 0; } //! the block matrices per block-column const DiagonalVector &diagonal() const { return _diagonal; } DiagonalVector &diagonal() { return _diagonal; } //! indices of the row blocks const std::vector<int> &blockIndices() const { return _blockIndices; } void multiply(double *&dest, const double *src) const { int destSize = cols(); if (!dest) { dest = new double[destSize]; memset(dest, 0, destSize * sizeof(double)); } // map the memory by Eigen Eigen::Map<Eigen::VectorXd> destVec(dest, destSize); Eigen::Map<const Eigen::VectorXd> srcVec(src, rows()); #ifdef G2O_OPENMP #pragma omp parallel for default(shared) schedule(dynamic, 10) #endif for (int i = 0; i < static_cast<int>(_diagonal.size()); ++i) { int destOffset = baseOfBlock(i); int srcOffset = destOffset; const SparseMatrixBlock &A = _diagonal[i]; // destVec += *A.transpose() * srcVec (according to the sub-vector // parts) internal::axpy(A, srcVec, srcOffset, destVec, destOffset); } } protected: const std::vector<int> &_blockIndices; ///< vector of the indices of the ///< blocks along the diagonal DiagonalVector _diagonal; }; } // namespace g2o #endif
micro-app-aos-openmp.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <getopt.h> #include <lua.h> #include <lauxlib.h> #include <lualib.h> #define NPOINTS 10000 #define NEDGES 10000 struct edge { int v0; int v1; float data; float v0_pt_data[3]; float v1_pt_data[3]; }; struct edge edges[NEDGES]; float pt_data[NPOINTS][3]; float edge_data[NEDGES]; void print_help() { printf("Usage: \n"); printf("\t --help print this message and exit \n"); printf("\t --type Type of graph, must be one of:\n"); printf("\t\t\t pure_random \n"); printf("\t\t\t regular_random \n"); printf("\t\t\t contiguous \n"); printf("\t --nloops Number of repetitions, must be \n"); printf("\t at least one. \n"); printf("\t --file File from which to read graph \n"); } double timer() { struct timeval tp; struct timezone tzp; long i; i = gettimeofday(&tp, &tzp); return ((double)tp.tv_sec) + ((double) tp.tv_usec) * 1e-6; } int graph_init(char* graph_type, char* fname) { lua_State *L; int i, j, k, v; L = luaL_newstate(); luaL_openlibs(L); luaL_loadfile(L, "graph.lua"); lua_pcall(L, 0, 0, 0); lua_getglobal(L, "create_graph"); lua_pushstring(L, graph_type); lua_pushinteger(L, NPOINTS); lua_pushinteger(L, NEDGES); if (fname != NULL) { lua_pushstring(L, fname); lua_call(L, 4, 1); } else { lua_call(L, 3, 1); } /* Table is now sitting at the top of the stack */ i = 0; lua_pushnil(L); /* Make sure lua_next starts at beginning */ while (lua_next(L, -2) != 0) { // fetch first key k = lua_tointeger(L, -2); lua_pushnil(L); while (lua_next(L, -2) != 0) { // loop over neighbors lua_pop(L,1); v = lua_tointeger(L, -1); // build edges array here edges[i].v0 = k - 1; edges[i].v1 = v - 1; for (j = 0; j < 3; j++) { edges[i].v0_pt_data[j] = 0; edges[i].v1_pt_data[j] = 0; } i++; } lua_pop(L,1); } lua_close(L); if (i == 0) { return -1; } else { return 0; } } int data_init() { int i; for (i = 0; i < NPOINTS; i++) { pt_data[i][0] = 1; pt_data[i][1] = 1; pt_data[i][2] = 1; } return 0; } int edge_data_init() { int i; for (i = 0; i < NEDGES; i++) { edge_data[i] = 1; } return 0; } int edge_gather() { int i,j; int v0; int v1; #pragma omp parallel for \ private(i, j, v0, v1) for (i = 0; i < NEDGES; i++) { v0 = edges[i].v0; v1 = edges[i].v1; edges[i].v0_pt_data[0] = pt_data[v0][0]; edges[i].v0_pt_data[1] = pt_data[v0][1]; edges[i].v0_pt_data[2] = pt_data[v0][2]; edges[i].v1_pt_data[0] = pt_data[v1][0]; edges[i].v1_pt_data[1] = pt_data[v1][1]; edges[i].v1_pt_data[2] = pt_data[v1][2]; edges[i].data = edge_data[i]; } return 0; } int edge_compute() { int i, j; float v0_p0, v0_p1, v0_p2; float v1_p0, v1_p1, v1_p2; float x0, x1, x2; float e_data; #pragma omp parallel for \ private(i, j, v0_p0, v0_p1, v0_p2) \ private(v1_p0, v1_p1, v1_p2) \ private(x0, x1, x2, e_data) for (i = 0; i < NEDGES; i++) { v0_p0 = edges[i].v0_pt_data[0]; v0_p1 = edges[i].v0_pt_data[1]; v0_p2 = edges[i].v0_pt_data[2]; v1_p0 = edges[i].v1_pt_data[0]; v1_p1 = edges[i].v1_pt_data[1]; v1_p2 = edges[i].v1_pt_data[2]; e_data = edges[i].data; x0 = (v0_p0 + v1_p0) * e_data; x1 = (v0_p1 + v1_p1) * e_data; x2 = (v0_p2 + v1_p2) * e_data; edges[i].v0_pt_data[0] = x0; edges[i].v0_pt_data[1] = x1; edges[i].v0_pt_data[2] = x2; edges[i].v1_pt_data[0] = x0; edges[i].v1_pt_data[1] = x1; edges[i].v1_pt_data[2] = x2; } return 0; } int edge_scatter() { int i; int v0; int v1; #pragma omp parallel for \ private(i, v0, v1) for (i = 0; i < NEDGES; i++) { v0 = edges[i].v0; v1 = edges[i].v1; #pragma omp atomic pt_data[v0][0] += edges[i].v0_pt_data[0]; #pragma omp atomic pt_data[v0][1] += edges[i].v0_pt_data[1]; #pragma omp atomic pt_data[v0][2] += edges[i].v0_pt_data[2]; #pragma omp atomic pt_data[v1][0] += edges[i].v1_pt_data[0]; #pragma omp atomic pt_data[v1][1] += edges[i].v1_pt_data[1]; #pragma omp atomic pt_data[v1][2] += edges[i].v1_pt_data[2]; } return 0; } int main(int argc, char** argv) { int i; int rv; double time0, time1; int c, opt_i; int nloops = 0; char* gt = ""; char* fname = ""; static struct option long_opts[] = { {"help", no_argument, 0, 0}, {"type", required_argument, 0, 0}, {"nloops", required_argument, 0, 0}, {"file", required_argument, 0, 0} }; /* Parse command-line arguments */ while (1) { c = getopt_long(argc, argv, "", long_opts, &opt_i); if (c == -1) { break; } if (c == 0) { switch (opt_i) { case 0: print_help(); exit(0); case 1: gt = optarg; break; case 2: nloops = atoi(optarg); break; case 3: fname = optarg; break; } } else { print_help(); exit(0); } } /* check for errors */ if (gt == NULL || nloops < 1) { print_help(); exit(0); } // initialize data structures rv = graph_init(gt, fname); if (rv < 0) { printf("Error creating graph. \n"); exit(0); } data_init(); edge_data_init(); // loop time0 = timer(); for (i = 0; i < nloops; i++) { edge_gather(); edge_compute(); edge_scatter(); } time1 = timer(); // print results for (i = 0; i < 10; i++) { printf("%i : %f %f %f \n", i, pt_data[i][0], pt_data[i][1], pt_data[i][2]); } printf("Time: %f s \n", (time1 - time0) / ((float) nloops)); return 0; }
cycle_share.c
// SPDX-License-Identifier: BSD-2-Clause /* Copyright 1998-2018,2021 Bernard Parent Copyright 2020 Minindu Weerakoon Copyright 2001 Giovanni Fusina Copyright 2002 Thomas E. Schwartzentruber Copyright 2021 Prasanna Thoguluva Rajendran Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cycle/share/cycle_share.h> #include <src/data.h> #include <src/common.h> #include <src/bdry.h> #include <src/init.h> #include <src/post.h> #include <cycle/ts/_ts.h> #include <cycle/tsemf/_tsemf.h> #include <cycle/_cycle.h> #include <cycle/res/_res.h> #include <cycle/resconv/_resconv.h> #include <cycle/restime/_restime.h> #include <model/fluid/_fluid.h> #include <model/emfield/_emfield.h> #include <model/metrics/_metrics.h> #include <model/fluid/_fluid.h> #ifdef OPENMPTHREADS #define maxloopthread LONG_MAX #define maxzonethread LONG_MAX #else #define maxloopthread 256 #define maxzonethread 256 #endif #define MAXRATIO_DTAUMAX_DTAUMIN 100.0 typedef struct { np_t *np; gl_t *gl; long theta,ls,le; } segment_t; typedef struct { np_t *np; gl_t *gl; long theta,ls,le; void (*funct)(np_t *, gl_t *, long, long, long); } segmentarg_t; typedef struct { np_t *np; gl_t *gl; zone_t zone; void (*funct)(np_t *, gl_t *, zone_t); } threadzone_t; void *segmentfunct(void *segmentarg){ (((segmentarg_t *) segmentarg)->funct)( ((segmentarg_t *) segmentarg)->np, ((segmentarg_t *) segmentarg)->gl, ((segmentarg_t *) segmentarg)->theta, ((segmentarg_t *) segmentarg)->ls, ((segmentarg_t *) segmentarg)->le); return(NULL); } void find_musclvarscycle(np_t np, gl_t *gl, musclvarscycle_t musclvars){ find_musclvars(np,gl,musclvars); #ifdef _RESTIME_STORAGE_TRAPEZOIDAL_MUSCLVARS long flux; // for (flux=0; flux<nf; flux++) musclvars[nf+flux]=musclvars[flux]; for (flux=0; flux<nf; flux++) musclvars[nf+flux]=np.bs->trapezoidalm1[flux]; #endif } static void execute_function_on_all_segments(segmentarg_t *segmentarg, long numsegment, int SEGMENTWORK){ if ( #if !defined(POSIXTHREADS) && !defined(OPENMPTHREADS) TRUE #else (SEGMENTWORK==SEGMENTWORK_LIGHT && segmentarg[0].gl->NOSHORTTHREADS) #endif ){ long cnt; for (cnt=0; cnt<numsegment; cnt++){ segmentarg[cnt].funct(segmentarg[cnt].np,segmentarg[cnt].gl,segmentarg[cnt].theta,segmentarg[cnt].ls,segmentarg[cnt].le); } } else { #ifdef POSIXTHREADS long cnt; void *retval; pthread_t *pthread; pthread=(pthread_t *)malloc((numsegment+3)*sizeof(pthread_t)); for (cnt=0; cnt<numsegment; cnt++){ if (pthread_create(&((pthread)[cnt]), NULL, segmentfunct, (void *)(&(segmentarg[cnt])))) fatal_error("Cannot create thread."); } for (cnt=0; cnt<numsegment; cnt++){ if (pthread_join(pthread[cnt],&retval)) fatal_error("Cannot join thread %ld.",cnt); } free(pthread); #endif #ifdef OPENMPTHREADS long cnt; #pragma omp parallel for private(cnt) schedule(dynamic) for (cnt=0; cnt<numsegment; cnt++){ segmentarg[cnt].funct(segmentarg[cnt].np,segmentarg[cnt].gl,segmentarg[cnt].theta,segmentarg[cnt].ls,segmentarg[cnt].le); } #endif } } static void create_segments(np_t *np, gl_t *gl, long theta, long ls, long le, void funct(np_t *, gl_t *, long, long, long), segmentarg_t *segmentarg, long *cntsegment, bool COUNTFLAG, int TYPELEVEL, bool is_node_valid_local(np_t, int)){ long l,lm1,ls_local,le_local; bool INSIDE; l=ls; ls_local=ls; /* only needed to avoid compiler warning */ INSIDE=FALSE; do { lm1=l; l=_l_plus_one(l,gl,theta); if ((!INSIDE) && (is_node_valid_local(np[l],TYPELEVEL))) { ls_local=lm1; INSIDE=TRUE; } if ((INSIDE) && ((!is_node_valid_local(np[l],TYPELEVEL)) || (l==le))){ le_local=l; if (!COUNTFLAG) { segmentarg[*cntsegment].np=np; segmentarg[*cntsegment].gl=gl; segmentarg[*cntsegment].theta=theta; segmentarg[*cntsegment].ls=_l_plus_one(ls_local,gl,theta); segmentarg[*cntsegment].le=_l_minus_one(le_local,gl,theta); segmentarg[*cntsegment].funct=funct; } (*cntsegment)++; INSIDE=FALSE; } } while (l!=le); if (INSIDE) fatal_error("Problem setting up segments."); } void sweep_with_1D_segments(np_t *np, gl_t *gl, zone_t zone, void funct(np_t *, gl_t *, long, long, long), int sweeptype, int TYPELEVEL, bool is_node_valid_local(np_t, int), int SEGMENTWORK, int GRIDLEVEL){ long j,k,cntsegment,numthread; ifn1D( long i; ) segmentarg_t *segmentarg; int cnt; bool COUNTFLAG; numthread=0; assert(is_zone_in_zone(zone,gl->domain_all)); segmentarg=(segmentarg_t *)malloc(sizeof(segmentarg_t)); /* do this loop twice: the first time just to count.. */ for (cnt=0; cnt<2; cnt++){ if (cnt==0) COUNTFLAG=TRUE; else COUNTFLAG=FALSE; if (!COUNTFLAG) segmentarg=(segmentarg_t *)realloc(segmentarg,numthread*sizeof(segmentarg_t)); /* the first dimension loop */ if (sweeptype==SWEEPTYPE_IJK || sweeptype==SWEEPTYPE_I) { cntsegment=0; for_2DL(j,zone.js,zone.je){ if (mod(j-gl->domain_all.js,GRIDLEVEL)==0){ for_3DL(k,zone.ks,zone.ke){ if (mod(k-gl->domain_all.ks,GRIDLEVEL)==0){ create_segments(np,gl,0,_ai(gl,zone.is-1,j,k),_ai(gl,zone.ie+1,j,k), funct, segmentarg,&cntsegment, (bool)COUNTFLAG, TYPELEVEL,is_node_valid_local); if (cntsegment>=maxloopthread) { numthread=max(numthread,cntsegment); if (!COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); cntsegment=0; } } } } } if (cntsegment>0 && !COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); numthread=max(numthread,cntsegment); } /* the second dimension loop */ #ifdef _2DL if (sweeptype==SWEEPTYPE_IJK || sweeptype==SWEEPTYPE_J) { cntsegment=0; for_1DL(i,zone.is,zone.ie){ if (mod(i-gl->domain_all.is,GRIDLEVEL)==0){ for_3DL(k,zone.ks,zone.ke){ if (mod(k-gl->domain_all.ks,GRIDLEVEL)==0){ create_segments(np,gl,1,_ai(gl,i,zone.js-1,k),_ai(gl,i,zone.je+1,k), funct, segmentarg,&cntsegment,(bool)COUNTFLAG, TYPELEVEL,is_node_valid_local); if (cntsegment>=maxloopthread) { numthread=max(numthread,cntsegment); if (!COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); cntsegment=0; } } } } } if (cntsegment>0 && !COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); numthread=max(numthread,cntsegment); } #endif /* the third dimension loop */ #ifdef _3DL if (sweeptype==SWEEPTYPE_IJK || sweeptype==SWEEPTYPE_K) { cntsegment=0; for_1DL(i,zone.is,zone.ie){ if (mod(i-gl->domain_all.is,GRIDLEVEL)==0){ for_2DL(j,zone.js,zone.je){ if (mod(j-gl->domain_all.js,GRIDLEVEL)==0){ create_segments(np,gl,2,_ai(gl,i,j,zone.ks-1),_ai(gl,i,j,zone.ke+1), funct, segmentarg, &cntsegment,(bool)COUNTFLAG, TYPELEVEL,is_node_valid_local); if (cntsegment>=maxloopthread) { numthread=max(numthread,cntsegment); if (!COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); cntsegment=0; } } } } } if (cntsegment>0 && !COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); numthread=max(numthread,cntsegment); } #endif } free(segmentarg); } /* the following first sets the offset to 0, then 1, then -1 */ static long _node_offset_from_cnt(long cnt){ long offset; offset=0; if (cnt==0) offset=0; if (cnt==1) offset=1; if (cnt==2) offset=-1; return(offset); } void update_bdry_node(np_t *np, gl_t *gl, long l){ long dim,dimsgn,l_C,l_B,l_A,l_D; bool BDRYDIRECFOUND; #ifdef _2DL long offset1,offset2,cnt1,cnt2; #endif #ifdef _3D long offset3,cnt3; #endif bool UPDATED; assert(is_node_bdry(np[l],TYPELEVEL_FLUID_WORK)); UPDATED=FALSE; BDRYDIRECFOUND=find_bdry_direc(np, gl, l, TYPELEVEL_FLUID_WORK, &dim, &dimsgn); if (is_node_link(np[l],TYPELEVEL_FLUID_WORK)) { // in case the boundary node is a link, U has already been updated: simply update the prim variables find_prim_fluid(np, l, gl); UPDATED=TRUE; } if (BDRYDIRECFOUND && !UPDATED){ l_A=l; l_B=_al(gl,l,dim,dimsgn); l_C=_al(gl,l,dim,dimsgn*2); if (is_node_inner(np[_al(gl,l,dim,dimsgn*3)],TYPELEVEL_FLUID_WORK)) l_D=_al(gl,l,dim,dimsgn*3); else l_D=l_C; assert(is_node_inner(np[l_C],TYPELEVEL_FLUID_WORK)); assert(is_node_inner(np[l_B],TYPELEVEL_FLUID_WORK)); update_bdry_fluid(np,gl,l_A,l_B,l_C,l_D,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_FLUID_WORK); UPDATED=TRUE; } /* now, do the corners */ if (!UPDATED) { #ifdef _2D for (cnt1=0; cnt1<=2; cnt1++){ for (cnt2=0; cnt2<=2; cnt2++){ offset1=_node_offset_from_cnt(cnt1); offset2=_node_offset_from_cnt(cnt2); l_C=_all(gl,l,0,offset1*2,1,offset2*2); l_B=_all(gl,l,0,offset1,1,offset2); l_A=l; l_D=l_C; if ( is_node_inner(np[l_B],TYPELEVEL_FLUID_WORK) && is_node_inner(np[l_C],TYPELEVEL_FLUID_WORK) && !UPDATED){ update_bdry_fluid(np,gl,l_A,l_B,l_C,l_D,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_FLUID_WORK); UPDATED=TRUE; } } } #endif #ifdef _3D for (cnt1=0; cnt1<=2; cnt1++){ for (cnt2=0; cnt2<=2; cnt2++){ for (cnt3=0; cnt3<=2; cnt3++){ offset1=_node_offset_from_cnt(cnt1); offset2=_node_offset_from_cnt(cnt2); offset3=_node_offset_from_cnt(cnt3); l_C=_al(gl, _al(gl, _al(gl,l,0,offset1*2), 1,offset2*2), 2,offset3*2); l_B=_al(gl, _al(gl, _al(gl,l,0,offset1), 1,offset2), 2,offset3); l_A=l; l_D=l_C; if ( is_node_inner(np[l_B],TYPELEVEL_FLUID_WORK) && is_node_inner(np[l_C],TYPELEVEL_FLUID_WORK) && !UPDATED){ update_bdry_fluid(np,gl,l_A,l_B,l_C,l_D,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_FLUID_WORK); UPDATED=TRUE; } } } } #endif } if (!UPDATED) { fatal_error("Problem updating boundary node in update_bdry_node() function."); } } void update_bdry_nodes_on_segment(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_bdry(np[l],TYPELEVEL_FLUID_WORK)){ thread_lock_node_set(np,l,THREADTYPE_ZONE); update_bdry_node(np, gl, l); thread_lock_node_unset(np,l,THREADTYPE_ZONE); } } } void update_bdry_nodes(np_t *np, gl_t *gl, zone_t zone){ sweep_with_1D_segments(np, gl, zone, &update_bdry_nodes_on_segment, SWEEPTYPE_I, TYPELEVEL_FLUID_WORK,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); } #ifdef DISTMPI #define numfluidvars (nf+1+max(0,hbw_resconv_fluid-1)*nmc) #define numlinkvars ((hbw_resconv_fluid-1)*nmc) #define DOUBLE_INT_MAX 100000000000000 typedef double sendvars_t[max(nfe,numfluidvars)]; typedef struct { sendvars_t vars; int proc; long l; bool SENT; } sendnode_t; void update_linked_nodes_2(np_t *np, gl_t *gl, int TYPELEVEL){ int rankrecv,numproc,ranksend,thisrank; long i,j,k; zone_t zonesend,zonerecv,zone; MPI_Status MPI_Status1; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); /* here we need to mpi the linkmusclvars */ for (ranksend=0; ranksend<numproc; ranksend++){ zonesend=_domain_from_rank(ranksend,gl); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){ zonerecv=_domain_lim_from_rank(rankrecv,gl); if (is_zone_intersecting_zone(zonesend,zonerecv)){ zone=_zone_intersection(zonesend,zonerecv); for_ijk(zone,is,js,ks,ie,je,ke){ if (ranksend==thisrank) { // if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)) printf("x"); if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)){ assert(np[_ai(gl,i,j,k)].numlinkmusclvars!=0); assert(np[_ai(gl,i,j,k)].linkmusclvars!=NULL); MPI_Send(&np[_ai(gl,i,j,k)].numlinkmusclvars,1,MPI_INT,rankrecv,0,MPI_COMM_WORLD); MPI_Send(np[_ai(gl,i,j,k)].linkmusclvars,numlinkvars,MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD); } } if (rankrecv==thisrank) { if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)){ MPI_Recv(&np[_ai(gl,i,j,k)].numlinkmusclvars,1,MPI_INT,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); assert(np[_ai(gl,i,j,k)].linkmusclvars!=NULL); MPI_Recv(np[_ai(gl,i,j,k)].linkmusclvars,numlinkvars,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); } } } } } } } MPI_Barrier(MPI_COMM_WORLD); } void update_linked_nodes(np_t *np, gl_t *gl, int TYPELEVEL){ long i,j,k,l1,l2,flux,offset,l,cntlink; MPI_Status MPI_Status1; musclvarscycle_t musclvars; sendvars_t mpivars; int thisrank,numproc,rank2,rank1,thisproc; int packsize,buffersize,bbuffersize; double *buffer,*bbuffer; sendnode_t *sendnode; long numsendvars,numvars,numsend,cntsend,cnt; double *sendvars; int *recvproc; int cntproc; zone_t zone; zone=gl->domain; switch (TYPELEVEL){ case TYPELEVEL_FLUID: numvars=numfluidvars; break; case TYPELEVEL_FLUID_WORK: numvars=numfluidvars; break; #ifdef EMFIELD case TYPELEVEL_EMFIELD: numvars=nfe; break; #endif default: fatal_error("TYPELEVEL can not be set to %d.\n",TYPELEVEL); numvars=0; } sendnode=(sendnode_t *)malloc(sizeof(sendnode_t)); sendvars=(double *)malloc(sizeof(double)); cntsend=0; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); MPI_Pack_size( 1, MPI_DOUBLE, MPI_COMM_WORLD, &packsize ); recvproc=(int *)malloc((numproc+2)*sizeof(int)); buffersize = min(INT_MAX,nmc*(zone.ie-zone.is)*(zone.je-zone.js)if3DL(*(zone.ke-zone.ks)) * (MPI_BSEND_OVERHEAD + packsize)); buffer = (double *)malloc( buffersize ); MPI_Buffer_attach( buffer, buffersize ); for_ijk(zone,is,js,ks,ie,je,ke){ np[_ai(gl,i,j,k)].numlinkmusclvars=0; } /* first send the packets */ cntsend=0; for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL)){ #ifdef _CYCLE_MULTIZONE fatal_error("Linked nodes can not be used with Multizone cycle yet. Need to update update_linked_nodes() function."); #endif #ifdef _CYCLE_MULTIZONE_MARCHING fatal_error("Linked nodes can not be used with MultizoneMarching cycle yet. Need to update update_linked_nodes() function."); #endif if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL)){ for (cntlink=0; cntlink<_num_node_link(np[_ai(gl,i,j,k)],TYPELEVEL); cntlink++){ l1=_ai_all(gl,i,j,k); l2=_node_link(np[_ai(gl,i,j,k)],cntlink,TYPELEVEL); rank1=_node_rank(gl, i, j, k); rank2=_node_rank(gl, _i_all(l2,gl,0), _i_all(l2,gl,1), _i_all(l2,gl,2)); if (rank1==thisrank) { if (TYPELEVEL==TYPELEVEL_FLUID_WORK || TYPELEVEL==TYPELEVEL_FLUID){ for (flux=0; flux<nf; flux++) mpivars[flux]=np[_l_from_l_all(gl,l1)].bs->U[flux]; mpivars[nf]=(double)_nodes_between_link_and_bdry_limited(np, gl, _l_from_l_all(gl,l1), l2, TYPELEVEL, max(0,hbw_resconv_fluid-1)); for (offset=1; offset<hbw_resconv_fluid; offset++) { // find_prim_fluid(np, _al_link(np, gl, _l_from_l_all(gl,l1), offset, TYPELEVEL), gl); find_musclvarscycle(np[_al_link(np, gl, _l_from_l_all(gl,l1), l2, offset, TYPELEVEL)], gl, musclvars); for (flux=0; flux<nmc; flux++) mpivars[1+flux+nf+(offset-1)*nmc]=musclvars[flux]; } if (rank1!=rank2){ for (flux=0; flux<numvars; flux++) sendnode[cntsend].vars[flux]=mpivars[flux]; sendnode[cntsend].proc=(int)rank2; sendnode[cntsend].l=l2; sendnode[cntsend].SENT=FALSE; cntsend++; sendnode=(sendnode_t *)realloc(sendnode,(cntsend+1)*sizeof(sendnode_t)); } else { /* no need to send with MPI*/ //printf("\n --(%ld,%ld,%ld) %d",i,j,k,thisrank); l=_l_from_l_all(gl,l2); for (flux=0; flux<nf; flux++) np[l].bs->U[flux]=mpivars[flux]; assert(np[l].linkmusclvars!=NULL); assert(is_node_bdry(np[l],TYPELEVEL)); assert(is_node_link(np[l],TYPELEVEL)); np[l].numlinkmusclvars=(short)round(mpivars[nf]); for (offset=1; offset<hbw_resconv_fluid; offset++) { for (flux=0; flux<nmc; flux++) np[l].linkmusclvars[flux+(offset-1)*nmc]=mpivars[1+flux+nf+(offset-1)*nmc]; } } } #ifdef EMFIELD if (TYPELEVEL==TYPELEVEL_EMFIELD){ for (flux=0; flux<numvars; flux++) mpivars[flux]=np[_l_from_l_all(gl,l1)].bs->Uemfield[flux]; if (rank1!=rank2) { for (flux=0; flux<numvars; flux++) sendnode[cntsend].vars[flux]=mpivars[flux]; sendnode[cntsend].proc=(int)rank2; sendnode[cntsend].l=l2; sendnode[cntsend].SENT=FALSE; cntsend++; sendnode=(sendnode_t *)realloc(sendnode,(cntsend+1)*sizeof(sendnode_t)); } else { /* no need to send with MPI */ for (flux=0; flux<nfe; flux++) np[_l_from_l_all(gl,l2)].bs->Uemfield[flux]=mpivars[flux]; } } #endif } } } } } numsend=cntsend; /* send nodes in block one proc at a time */ do { thisproc=-1; numsendvars=0; for (cntsend=0; cntsend<numsend; cntsend++){ if (thisproc==-1 && !sendnode[cntsend].SENT) thisproc=sendnode[cntsend].proc; if (sendnode[cntsend].proc==thisproc){ assert(!sendnode[cntsend].SENT); sendvars=(double *)realloc(sendvars,(numsendvars+2*numvars)*sizeof(double)); for (flux=0; flux<numvars; flux++) sendvars[numsendvars+flux]=sendnode[cntsend].vars[flux]; numsendvars+=numvars; #ifndef NDEBUG sendvars[numsendvars]=(double)mod(sendnode[cntsend].l,DOUBLE_INT_MAX); numsendvars++; #endif sendnode[cntsend].SENT=TRUE; } } if (thisproc!=-1){ if (MPI_Bsend(&numsendvars,1,MPI_LONG,thisproc,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("Problem with MPI_Bsend in update_linked_nodes()."); if (MPI_Bsend(sendvars,numsendvars,MPI_DOUBLE,thisproc,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("Problem with MPI_Bsend in update_linked_nodes()."); } } while (thisproc!=-1); for (cnt=0; cnt<(numproc+2); cnt++){ recvproc[cnt]=-1; } for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL)){ l1=_node_link(np[_ai(gl,i,j,k)],0,TYPELEVEL); rank2=_node_rank(gl, i, j, k); rank1=_node_rank(gl, _i_all(l1,gl,0), _i_all(l1,gl,1), _i_all(l1,gl,2)); if (rank1!=rank2 && rank2==thisrank){ /* rank1 is one process that we will need to get data from; store it in recvproc */ cntproc=0; while(recvproc[cntproc]!=-1 && recvproc[cntproc]!=rank1 ) { cntproc++; } assert(cntproc<numproc); recvproc[cntproc]=rank1; } } } cntproc=0; while (recvproc[cntproc]!=-1) { thisproc=recvproc[cntproc]; MPI_Recv(&numsendvars,1,MPI_LONG,thisproc,0,MPI_COMM_WORLD,&MPI_Status1); sendvars=(double *)realloc(sendvars,numsendvars*sizeof(double)); MPI_Recv(sendvars,numsendvars,MPI_DOUBLE,thisproc,0,MPI_COMM_WORLD,&MPI_Status1); cntsend=0; for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL)){ l2=_ai_all(gl,i,j,k); assert(is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL)); l1=_node_link(np[_ai(gl,i,j,k)],0,TYPELEVEL); rank2=_node_rank(gl, i, j, k); rank1=_node_rank(gl, _i_all(l1,gl,0), _i_all(l1,gl,1), _i_all(l1,gl,2)); if (rank1!=rank2 && rank2==thisrank){ if (thisproc==rank1){ for (flux=0; flux<numvars; flux++) mpivars[flux]=sendvars[cntsend+flux]; cntsend+=numvars; #ifndef NDEBUG assert(mod(l2,DOUBLE_INT_MAX)==(long)sendvars[cntsend]); cntsend++; #endif l=_l_from_l_all(gl,l2); assert(is_node_bdry(np[l],TYPELEVEL)); assert(is_node_link(np[l],TYPELEVEL)); if (TYPELEVEL==TYPELEVEL_FLUID_WORK || TYPELEVEL==TYPELEVEL_FLUID){ for (flux=0; flux<nf; flux++) np[l].bs->U[flux]=mpivars[flux]; assert(np[l].linkmusclvars!=NULL); np[l].numlinkmusclvars=(short)round(mpivars[nf]); for (offset=1; offset<hbw_resconv_fluid; offset++) { for (flux=0; flux<nmc; flux++) np[l].linkmusclvars[flux+(offset-1)*nmc]=mpivars[1+flux+nf+(offset-1)*nmc]; } } #ifdef EMFIELD if (TYPELEVEL==TYPELEVEL_EMFIELD){ for (flux=0; flux<nfe; flux++) np[l].bs->Uemfield[flux]=mpivars[flux]; } #endif } } } } cntproc++; } MPI_Buffer_detach( &bbuffer, &bbuffersize ); free(buffer); MPI_Barrier(MPI_COMM_WORLD); free(sendnode); free(recvproc); free(sendvars); update_linked_nodes_2(np, gl, TYPELEVEL); } #else//DISTMPI void update_linked_nodes(np_t *np, gl_t *gl, int TYPELEVEL){ long i,j,k,l1,l2,flux; for_ijk(gl->domain,is,js,ks,ie,je,ke){ l1=_ai(gl,i,j,k); if (is_node_bdry(np[l1],TYPELEVEL) && is_node_link(np[l1],TYPELEVEL)){ #ifdef _CYCLE_MULTIZONE fatal_error("Linked nodes can not be used with Multizone cycle yet. Need to update update_linked_nodes() function."); #endif #ifdef _CYCLE_MULTIZONE_MARCHING fatal_error("Linked nodes can not be used with MultizoneMarching cycle yet. Need to update update_linked_nodes() function."); #endif assert(is_node_bdry(np[l1],TYPELEVEL)); l2=_node_link(np[l1],0,TYPELEVEL); if (TYPELEVEL==TYPELEVEL_FLUID_WORK || TYPELEVEL==TYPELEVEL_FLUID){ for (flux=0; flux<nf; flux++) np[l1].bs->U[flux]=np[l2].bs->U[flux]; } #ifdef EMFIELD if (TYPELEVEL==TYPELEVEL_EMFIELD){ for (flux=0; flux<nfe; flux++) np[l1].bs->Uemfield[flux]=np[l2].bs->Uemfield[flux]; } #endif } } } #endif//DISTMPI static bool is_node_in_region(bool(*FUNCT)(gl_t *, long, long, long), gl_t *gl, long i, long j, long k){ bool tmp; tmp=FUNCT(gl,i,j,k); return(tmp); } static bool is_node_in_region_extended_by_bb(bool(*FUNCT)(gl_t *, long, long, long), gl_t *gl, long i, long j, long k){ bool tmp; long cnti,cntj,cntk; tmp=FALSE; for_1DL(cnti,i-hbw_bdry_fluid,i+hbw_bdry_fluid){ for_2DL(cntj,j-hbw_bdry_fluid,j+hbw_bdry_fluid){ for_3DL(cntk,k-hbw_bdry_fluid,k+hbw_bdry_fluid){ if (FUNCT(gl,cnti,cntj,cntk)) tmp=TRUE; } } } return(tmp); } void resume_nodes_specified_in_function(np_t *np, gl_t *gl, bool(*FUNCT)(gl_t *, long, long, long)){ long i,j,k; long *noderes; long *bdryres; long numnoderes,numbdryres,cnt; copy_base_to_work_node_type(np,gl,gl->domain_lim); noderes=(long *)malloc((gl->domain.ie-gl->domain.is+4)if2DL(*(gl->domain.je-gl->domain.js+4)) if3DL(*(gl->domain.ke-gl->domain.ks+4))*sizeof(long)); bdryres=(long *)malloc((gl->domain.ie-gl->domain.is+4)if2DL(*(gl->domain.je-gl->domain.js+4)) if3DL(*(gl->domain.ke-gl->domain.ks+4))*sizeof(long)); numnoderes=0; numbdryres=0; for_ijk(gl->domain,is-1,js-1,ks-1,ie+1,je+1,ke+1){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && is_node_in_region_extended_by_bb(FUNCT,gl,i,j,k)) { if (resume_node(&(np[_ai(gl,i,j,k)])) ) { if (is_node_in_region(FUNCT,gl,i,j,k) ){ bdryres[numbdryres]=_ai(gl,i,j,k); numbdryres++; } noderes[numnoderes]=_ai(gl,i,j,k); numnoderes++; } } else { suspend_node(&(np[_ai(gl,i,j,k)])); } } /* rebuild the working variables of the inner nodes of the nodes resumed*/ for (cnt=0; cnt<numnoderes; cnt++){ if (is_node_resumed(np[noderes[cnt]]) && is_node_inner(np[noderes[cnt]],TYPELEVEL_FLUID)){ find_prim_fluid(np,noderes[cnt],gl); } } /* rebuild the working variables of the boundary nodes of the nodes resumed*/ for (cnt=0; cnt<numbdryres; cnt++){ if (is_node_resumed(np[bdryres[cnt]]) && is_node_bdry(np[bdryres[cnt]],TYPELEVEL_FLUID)) { update_bdry_node(np,gl,bdryres[cnt]); } } /* suspend all nodes needed only to compute the boundary nodes. this is necessary to ensure that all non-suspended nodes are properly updated.*/ for_ijk(gl->domain,is-1,js-1,ks-1,ie+1,je+1,ke+1){ if (!(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && is_node_in_region(FUNCT,gl,i,j,k))) suspend_node(&(np[_ai(gl,i,j,k)])); } free(noderes); free(bdryres); } void resume_nodes_only_in_zone_and_update_bdry_nodes(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; long *noderes; long *bdryres; long numnoderes,numbdryres,cnt; copy_base_to_work_node_type(np,gl,gl->domain_lim); noderes=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1) if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1)) if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long)); bdryres=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1) if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1)) if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long)); numnoderes=0; numbdryres=0; for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && (i>=zone.is-hbw_bdry_fluid) && (i<=zone.ie+hbw_bdry_fluid) if2DL(&& (j>=zone.js-hbw_bdry_fluid) && (j<=zone.je+hbw_bdry_fluid)) if3DL(&& (k>=zone.ks-hbw_bdry_fluid) && (k<=zone.ke+hbw_bdry_fluid))) { if (resume_node(&(np[_ai(gl,i,j,k)])) ) { if (is_node_in_zone(i,j,k,zone)){ bdryres[numbdryres]=_ai(gl,i,j,k); numbdryres++; } noderes[numnoderes]=_ai(gl,i,j,k); numnoderes++; } } else { suspend_node(&(np[_ai(gl,i,j,k)])); } } /* rebuild the working variables of the inner nodes of the nodes resumed*/ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<numnoderes; cnt++){ if (is_node_resumed(np[noderes[cnt]]) && is_node_inner(np[noderes[cnt]],TYPELEVEL_FLUID)){ find_prim_fluid(np,noderes[cnt],gl); } } free(noderes); /* rebuild the working variables of the boundary nodes of the nodes resumed*/ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<numbdryres; cnt++){ if (is_node_resumed(np[bdryres[cnt]]) && is_node_bdry(np[bdryres[cnt]],TYPELEVEL_FLUID)) { find_ijk_from_l(gl, bdryres[cnt], &i, &j, &k); if (is_node_in_zone(i, j, k, gl->domain)){ update_bdry_node(np,gl,bdryres[cnt]); } else { find_prim_fluid(np,bdryres[cnt],gl); } } } free(bdryres); /* suspend all nodes needed only to compute the boundary nodes. this is necessary to ensure that all non-suspended nodes are properly updated.*/ for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (!(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && is_node_in_zone(i,j,k,zone))) suspend_node(&(np[_ai(gl,i,j,k)])); } } void resume_nodes_in_zone(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; long *noderes; long numnoderes,cnt; zone_t zoneint; copy_base_to_work_node_type(np,gl,gl->domain_lim); noderes=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1) if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1)) if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long)); numnoderes=0; zoneint=_zone_intersection(gl->domain_lim,zone); for_ijk(zoneint,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)) { if (resume_node(&(np[_ai(gl,i,j,k)])) ) { noderes[numnoderes]=_ai(gl,i,j,k); numnoderes++; } } } /* rebuild the working variables of the inner nodes of the nodes resumed*/ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<numnoderes; cnt++){ if (is_node_resumed(np[noderes[cnt]]) && is_node_valid(np[noderes[cnt]],TYPELEVEL_FLUID)){ find_prim_fluid(np,noderes[cnt],gl); } } free(noderes); } void resume_nodes_only_in_zone(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; long *noderes; long numnoderes,cnt; copy_base_to_work_node_type(np,gl,gl->domain_lim); noderes=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1) if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1)) if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long)); numnoderes=0; for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && (i>=zone.is) && (i<=zone.ie) if2DL(&& (j>=zone.js) && (j<=zone.je)) if3DL(&& (k>=zone.ks) && (k<=zone.ke))) { if (resume_node(&(np[_ai(gl,i,j,k)])) ) { noderes[numnoderes]=_ai(gl,i,j,k); numnoderes++; } } else { suspend_node(&(np[_ai(gl,i,j,k)])); } } /* rebuild the working variables of the inner nodes of the nodes resumed*/ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<numnoderes; cnt++){ if (is_node_resumed(np[noderes[cnt]]) && is_node_valid(np[noderes[cnt]],TYPELEVEL_FLUID)){ find_prim_fluid(np,noderes[cnt],gl); } } free(noderes); /* suspend all nodes needed only to compute the boundary nodes. this is necessary to ensure that all non-suspended nodes are properly updated.*/ for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (!(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && is_node_in_zone(i,j,k,zone))) suspend_node(&(np[_ai(gl,i,j,k)])); } } #ifdef UNSTEADY void increase_time_level(np_t *np, gl_t *gl){ long i,j,k,flux,l; gl->time+=gl->dt; gl->iter=0; add_double_to_codex(&(gl->cycle.codex),"time",gl->time); for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ l=_ai(gl,i,j,k); if ((is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID))){ for (flux=0; flux<nf; flux++){ #if _RESTIME_BW > 3 np[l].bs->Um3[flux]=np[l].bs->Um2[flux]; #endif #if _RESTIME_BW > 2 np[l].bs->Um2[flux]=np[l].bs->Um1[flux]; #endif np[l].bs->Um1[flux]=np[l].bs->U[flux]; #ifdef _RESTIME_STORAGE_TRAPEZOIDAL_RESIDUAL np[l].bs->trapezoidalm1[flux]=np[l].bs->trapezoidalm1_next[flux]; #endif } #ifdef _RESTIME_STORAGE_TRAPEZOIDAL_MUSCLVARS find_musclvars(np[l],gl,np[l].bs->trapezoidalm1); #endif } #ifdef EMFIELD if ((is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD))){ for (flux=0; flux<nfe; flux++){ np[l].bs->Uemfieldm1[flux]=np[l].bs->Uemfield[flux]; } } #endif } } #endif//UNSTEADY void runtime_actions(char *actionname, char **argum, SOAP_codex_t *codex){ char *oldfilename; oldfilename=(char *)malloc(sizeof(char)*(5+strlen((((readcontrolarg_t *)codex->action_args)->gl->output_filename)))); strcpy(oldfilename,(((readcontrolarg_t *)codex->action_args)->gl->output_filename)); if (strcmp(actionname,"WriteDataFile")==0) { if (SOAP_number_argums(*argum)==1){ SOAP_substitute_all_argums(argum, codex); SOAP_get_argum_string(codex,&(((readcontrolarg_t *)codex->action_args)->gl->output_filename),*argum,0); } if (SOAP_number_argums(*argum)>1){ SOAP_fatal_error(codex,"Action WriteDataFile() can not be called with more than 1 argument. Either it is called with one argument (a string containing the data file name) or with no argument. If no argument is given, the default data file name as specified on the command line will be used."); } write_data_file(*((readcontrolarg_t *)codex->action_args)->np, ((readcontrolarg_t *)codex->action_args)->gl); codex->ACTIONPROCESSED=TRUE; } strcpy((((readcontrolarg_t *)codex->action_args)->gl->output_filename),oldfilename); free(oldfilename); if (strcmp(actionname,"Init")==0) { read_init(*argum, codex); codex->action=&runtime_actions; ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED=TRUE; #ifdef EMFIELD ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED_EMFIELD=TRUE; #endif codex->ACTIONPROCESSED=TRUE; } if (strcmp(actionname,"Model")==0) { read_model(*argum, codex); codex->action=&runtime_actions; ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED=TRUE; #ifdef EMFIELD ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED_EMFIELD=TRUE; #endif codex->ACTIONPROCESSED=TRUE; } if (strcmp(actionname,"Disc")==0) { read_disc(*argum, codex); codex->action=&runtime_actions; ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED=TRUE; #ifdef EMFIELD ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED_EMFIELD=TRUE; #endif codex->ACTIONPROCESSED=TRUE; } if (strcmp(actionname,"Cycle")==0) { read_cycle(*argum, codex); codex->action=&runtime_actions; codex->ACTIONPROCESSED=TRUE; } if (strcmp(actionname,"Post")==0) { read_post(*argum, codex); codex->action=&runtime_actions; codex->ACTIONPROCESSED=TRUE; } runtime_actions_cycle_specific(actionname,argum,codex); } void write_cycle_template(FILE **controlfile){ wfprintf(*controlfile, "\n\n" "Cycle(\n" ); write_cycle_fluid_template(controlfile); #ifdef EMFIELD write_cycle_emfield_template(controlfile); #endif write_runtime_template(controlfile); wfprintf(*controlfile, ");\n" ); } void read_cycle_actions(char *actionname, char **argum, SOAP_codex_t *codex){ gl_t *gl; gl=((readcontrolarg_t *)codex->action_args)->gl; if (strcmp(actionname,_CYCLE_ACTIONNAME)==0 && !gl->CONTROL_READ) { if (((readcontrolarg_t *)codex->action_args)->VERBOSE) wfprintf(stdout,"%s..",_CYCLE_ACTIONNAME); ((readcontrolarg_t *)codex->action_args)-> gl->cycle.code_runtime=(char *)malloc((strlen(*argum)+2)*sizeof(char)); strcpy(((readcontrolarg_t *)codex->action_args)->gl->cycle.code_runtime,*argum); ((readcontrolarg_t *)codex->action_args)->gl->cycle.RUNTIMEMODULEFOUND=TRUE; codex->ACTIONPROCESSED=TRUE; } read_cycle_fluid_actions(actionname, argum, codex); read_cycle_emfield_actions(actionname, argum, codex); } void read_cycle(char *argum, SOAP_codex_t *codexcontrol){ gl_t *gl; gl=((readcontrolarg_t *)codexcontrol->action_args)->gl; if (!gl->CONTROL_READ){ gl->cycle.RUNTIMEMODULEFOUND=FALSE; } codexcontrol->action=&read_cycle_actions; SOAP_process_code(argum, codexcontrol, SOAP_VARS_KEEP_ALL); if (!gl->CONTROL_READ){ if (!gl->CYCLE_FLUID_READ) fatal_error("The fluid module %s() was not found within Cycle().",_FLUID_ACTIONNAME); if (!gl->CYCLE_EMFIELD_READ) fatal_error("The emfield module %s() was not found within Cycle().",_EMFIELD_ACTIONNAME); if (!gl->cycle.RUNTIMEMODULEFOUND) fatal_error("The module %s() was not found within Cycle().",_CYCLE_ACTIONNAME); init_cycle(argum,codexcontrol); } } void write_disc_template(FILE **controlfile){ wfprintf(*controlfile, "\n\n" "Disc(\n" ); write_disc_fluid_template(controlfile); #ifdef EMFIELD write_disc_emfield_template(controlfile); #endif write_disc_resconv_template(controlfile); write_disc_restime_template(controlfile); wfprintf(*controlfile, ");\n" ); } void read_disc_actions(char *actionname, char **argum, SOAP_codex_t *codex){ // gl_t *gl; // gl=((readcontrolarg_t *)codex->action_args)->gl; read_disc_fluid_actions(actionname, argum, codex); read_disc_emfield_actions(actionname, argum, codex); read_disc_resconv_actions(actionname, argum, codex); read_disc_restime_actions(actionname, argum, codex); } void read_disc(char *argum, SOAP_codex_t *codexcontrol){ gl_t *gl; gl=((readcontrolarg_t *)codexcontrol->action_args)->gl; codexcontrol->action=&read_disc_actions; gl->DISC_FLUID_READ=FALSE; gl->DISC_EMFIELD_READ=FALSE; gl->DISC_RESCONV_READ=FALSE; gl->DISC_RESTIME_READ=FALSE; SOAP_process_code(argum, codexcontrol, SOAP_VARS_KEEP_ALL); if (!gl->CONTROL_READ){ if (!gl->DISC_FLUID_READ) fatal_error("The fluid module %s() was not found within Disc().",_FLUID_ACTIONNAME); if (!gl->DISC_EMFIELD_READ) fatal_error("The emfield module %s() was not found within Disc().",_EMFIELD_ACTIONNAME); if (!gl->DISC_RESCONV_READ) fatal_error("The residual convection module %s() was not found within Disc().",_RESCONV_ACTIONNAME); if (!gl->DISC_RESTIME_READ) fatal_error("The residual time module %s() was not found within Disc().",_RESTIME_ACTIONNAME); } } #ifdef DISTMPI /* not used anymore */ void MPI_Allreduce_Sum_Cliplist(char **cliplist_str){ int rank,numproc,proc,thiscliplist_len; char *cliplistmem_str,*thiscliplist_str; cliplistmem_str=(char *)malloc((strlen(*cliplist_str)+10)*sizeof(char)); strcpy(cliplistmem_str,*cliplist_str); thiscliplist_str=(char *)malloc(sizeof(char)); strcpy(*cliplist_str,""); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); for (proc=0; proc<numproc; proc++){ if (proc==rank) { thiscliplist_len=strlen(cliplistmem_str); thiscliplist_str=(char *)realloc(thiscliplist_str,sizeof(char)*(thiscliplist_len+1)); strcpy(thiscliplist_str,cliplistmem_str); } MPI_Bcast(&thiscliplist_len,1,MPI_INT,proc,MPI_COMM_WORLD); thiscliplist_str=(char *)realloc(thiscliplist_str,sizeof(char)*(thiscliplist_len+1)); MPI_Bcast(thiscliplist_str,thiscliplist_len+1,MPI_CHAR,proc,MPI_COMM_WORLD); *cliplist_str=(char *)realloc(*cliplist_str,sizeof(char)*(strlen(*cliplist_str)+thiscliplist_len+1)); strcat(*cliplist_str,thiscliplist_str); } free(cliplistmem_str); free(thiscliplist_str); } void find_clipped_variables_all(gl_t *gl){ int rank,numproc,proc,cnt; int thisclipnamenum,thisclipname_len; char *thisclipname; long thisclipnum; reset_clipped_variables_all(gl); thisclipname=(char *)malloc(sizeof(char)); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); for (proc=0; proc<numproc; proc++){ if (proc==rank) { thisclipnamenum=gl->model.clipnamenum; } MPI_Bcast(&thisclipnamenum,1,MPI_INT,proc,MPI_COMM_WORLD); for (cnt=0; cnt<thisclipnamenum; cnt++){ if (proc==rank) { thisclipname_len=strlen(gl->model.clipname[cnt]); } MPI_Bcast(&thisclipname_len,1,MPI_INT,proc,MPI_COMM_WORLD); thisclipname=(char *)realloc(thisclipname,sizeof(char)*(thisclipname_len+1)); if (proc==rank) { strcpy(thisclipname,gl->model.clipname[cnt]); thisclipnum=gl->model.clipnum[cnt]; } MPI_Bcast(thisclipname,thisclipname_len+1,MPI_CHAR,proc,MPI_COMM_WORLD); MPI_Bcast(&thisclipnum,1,MPI_LONG,proc,MPI_COMM_WORLD); add_to_clipped_variables_all(gl, thisclipname, thisclipnum); // if (rank==0) printf("\n_%s(%ld)%d_",thisclipname,thisclipnum,proc); } } free(thisclipname); } #endif void update_runtime_codex_xi_from_gl(gl_t *gl, SOAP_codex_t *codex){ char *cliplist_str; #ifdef DISTMPI int rank,proc; long ijk_ximax; struct { double ximax; int rank; } ximaxrank,ximaxrank_max; #ifdef EMFIELD long ijk_ximax_emfield; struct { double ximax; int rank; } ximaxrank_emfield,ximaxrank_max_emfield; #endif #endif//DISTMPI #ifdef DISTMPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &proc); if (rank!=0) codex->SCREENOUTPUT=FALSE; #endif cliplist_str=(char *)malloc(sizeof(char)); #ifdef DISTMPI find_clipped_variables_all(gl); find_clipped_variables_list_all(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo",cliplist_str); find_clipped_muscl_variables_list_all(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo_muscl",cliplist_str); find_clipped_bdry_variables_list_all(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo_bdry",cliplist_str); #else find_clipped_variables_list(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo",cliplist_str); find_clipped_muscl_variables_list(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo_muscl",cliplist_str); find_clipped_bdry_variables_list(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo_bdry",cliplist_str); //MPI_Allreduce_Sum_Cliplist(&cliplist_str); #endif free(cliplist_str); #ifdef DISTMPI ximaxrank.ximax=gl->ximax; ximaxrank.rank=rank; MPI_Allreduce(&ximaxrank, &ximaxrank_max, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD); add_double_to_codex(codex,"ximax",ximaxrank_max.ximax); ijk_ximax=gl->i_ximax; MPI_Bcast(&ijk_ximax,1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"i_ximax",ijk_ximax); #ifdef EMFIELD ximaxrank_emfield.ximax=gl->ximax_emfield; ximaxrank_emfield.rank=rank; MPI_Allreduce(&ximaxrank_emfield, &ximaxrank_max_emfield, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD); add_double_to_codex(codex,"ximax_emfield",ximaxrank_max_emfield.ximax); ijk_ximax_emfield=gl->i_ximax_emfield; MPI_Bcast(&ijk_ximax_emfield,1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"i_ximax_emfield",ijk_ximax_emfield); #endif #ifdef _2DL ijk_ximax=gl->j_ximax; MPI_Bcast(&ijk_ximax,1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"j_ximax",ijk_ximax); #ifdef EMFIELD ijk_ximax_emfield=gl->j_ximax_emfield; MPI_Bcast(&ijk_ximax_emfield,1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"j_ximax_emfield",ijk_ximax_emfield); #endif #endif//_2DL #ifdef _3DL ijk_ximax=gl->k_ximax; MPI_Bcast(&ijk_ximax,1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"k_ximax",ijk_ximax); #ifdef EMFIELD ijk_ximax_emfield=gl->k_ximax_emfield; MPI_Bcast(&ijk_ximax_emfield,1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"k_ximax_emfield",ijk_ximax_emfield); #endif #endif//_3DL #else//DISTMPI add_double_to_codex(codex,"ximax",gl->ximax); add_int_to_codex(codex,"i_ximax",gl->i_ximax); #ifdef EMFIELD add_double_to_codex(codex,"ximax_emfield",gl->ximax_emfield); add_int_to_codex(codex,"i_ximax_emfield",gl->i_ximax_emfield); #endif #ifdef _2DL add_int_to_codex(codex,"j_ximax",gl->j_ximax); #ifdef EMFIELD add_int_to_codex(codex,"j_ximax_emfield",gl->j_ximax_emfield); #endif #endif//_2DL #ifdef _3DL add_int_to_codex(codex,"k_ximax",gl->k_ximax); #ifdef EMFIELD add_int_to_codex(codex,"k_ximax_emfield",gl->k_ximax_emfield); #endif #endif//_3DL #endif//DISTMPI } void update_runtime_codex_vars_except_xi_from_gl(gl_t *gl, SOAP_codex_t *codex){ #ifdef DISTMPI double effiter_U_sum,effiter_R_sum; int rank,proc; #ifdef EMFIELD double effiter_U_sum_emfield,effiter_R_sum_emfield; #endif #endif//DISTMPI #ifdef DISTMPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &proc); if (rank!=0) codex->SCREENOUTPUT=FALSE; #endif add_int_to_codex(codex,"iter", gl->iter); add_double_to_codex(codex,"xiverge",gl->cycle.fluid.xiverge); add_string_to_codex(codex,"outputfilename", gl->output_filename); #ifdef EMFIELD add_double_to_codex(codex,"xiverge_emfield",gl->cycle.emfield.xiverge); #endif #if defined(UNSTEADY) add_double_to_codex(codex,"time",gl->time); #endif add_double_to_codex(codex,"CFL",gl->CFL); #ifdef UNSTEADY add_double_to_codex(codex,"dt",gl->dt); #endif #ifdef _CYCLE_MULTIZONE_MARCHING add_double_to_codex(codex,"window.is",gl->window.is); add_double_to_codex(codex,"window.ie",gl->window.ie); add_int_to_codex(&(gl->cycle.codex), "numzones_updated", 0); add_int_to_codex(&(gl->cycle.codex), "numzones_total", 0); #endif #ifdef _CYCLE_MULTIZONE add_int_to_codex(&(gl->cycle.codex), "numzones_updated", 0); add_int_to_codex(&(gl->cycle.codex), "numzones_total", 0); #endif #ifdef DISTMPI MPI_Allreduce(&gl->effiter_U, &effiter_U_sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); add_double_to_codex(codex,"effiter_U",effiter_U_sum); MPI_Allreduce(&gl->effiter_R, &effiter_R_sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); add_double_to_codex(codex,"effiter_R",effiter_R_sum); #ifdef EMFIELD MPI_Allreduce(&gl->effiter_U_emfield, &effiter_U_sum_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); add_double_to_codex(codex,"effiter_U_emfield",effiter_U_sum_emfield); MPI_Allreduce(&gl->effiter_R_emfield, &effiter_R_sum_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); add_double_to_codex(codex,"effiter_R_emfield",effiter_R_sum_emfield); #endif #else//DISTMPI add_double_to_codex(codex,"effiter_U",gl->effiter_U); add_double_to_codex(codex,"effiter_R",gl->effiter_R); #ifdef EMFIELD add_double_to_codex(codex,"Lc",gl->Lc); // add_double_to_codex(codex,"relaxEMF",gl->relaxEMF); add_double_to_codex(codex,"effiter_U_emfield",gl->effiter_U_emfield); add_double_to_codex(codex,"effiter_R_emfield",gl->effiter_R_emfield); #endif #endif//DISTMPI } void add_constants_to_codex(gl_t *gl, SOAP_codex_t *codex){ char str[100]; sprintf(str, "%d", TSEMF_DEFAULT); SOAP_add_to_vars(codex,"TSEMF_DEFAULT",str); sprintf(str, "%d", TSEMF_ADI); SOAP_add_to_vars(codex,"TSEMF_ADI",str); sprintf(str, "%d", TSEMF_DDADI); SOAP_add_to_vars(codex,"TSEMF_DDADI",str); sprintf(str, "%d", TSEMF_IMAF); SOAP_add_to_vars(codex,"TSEMF_IMAF",str); sprintf(str, "%d", TSEMF_ADIIMAF); SOAP_add_to_vars(codex,"TSEMF_ADIIMAF",str); sprintf(str, "%d", TSEMF_NEWTON); SOAP_add_to_vars(codex,"TSEMF_NEWTON",str); sprintf(str, "%d", TSEMF_ADIi); SOAP_add_to_vars(codex,"TSEMF_ADIi",str); sprintf(str, "%d", TSEMF_ADIk); SOAP_add_to_vars(codex,"TSEMF_ADIk",str); sprintf(str, "%d", TSEMF_IMAFk); SOAP_add_to_vars(codex,"TSEMF_IMAFk",str); sprintf(str, "%d", TSEMF_IMAFi); SOAP_add_to_vars(codex,"TSEMF_IMAFi",str); sprintf(str, "%d", TSEMF_SOR); SOAP_add_to_vars(codex,"TSEMF_SOR",str); sprintf(str, "%d", TSEMF_SOR2); SOAP_add_to_vars(codex,"TSEMF_SOR2",str); sprintf(str, "%d", PRECON_CONSTANTTIMESTEP); SOAP_add_to_vars(codex,"PRECON_CONSTANTTIMESTEP",str); sprintf(str, "%d", PRECON_LOCALTIMESTEP); SOAP_add_to_vars(codex,"PRECON_LOCALTIMESTEP",str); sprintf(str, "%d", PRECON_LOCALTIMESTEP2); SOAP_add_to_vars(codex,"PRECON_LOCALTIMESTEP2",str); sprintf(str, "%d", PRECON_LOCALEIGENVALUE); SOAP_add_to_vars(codex,"PRECON_LOCALEIGENVALUE",str); sprintf(str, "%d", PRECON_LOCALEIGENVALUE2); SOAP_add_to_vars(codex,"PRECON_LOCALEIGENVALUE2",str); } void process_code_runtime(np_t *np, gl_t *gl, char *code_runtime, SOAP_codex_t *codex){ char *code; SOAP_vars_t *varsmem; readcontrolarg_t Runtimearg; varsmem=(SOAP_vars_t *)malloc(sizeof(SOAP_vars_t)); SOAP_copy_all_vars(codex->vars, &varsmem); Runtimearg.np=&np; Runtimearg.gl=gl; Runtimearg.input=(input_t *)malloc(sizeof(input_t)); Runtimearg.input->READDATAFILE=FALSE; Runtimearg.TYPELEVEL=TYPELEVEL_FLUID; Runtimearg.module_level=0; Runtimearg.POSTMODULE=FALSE; Runtimearg.CYCLEMODULE=FALSE; Runtimearg.RESETITERCOUNT=FALSE; Runtimearg.VERBOSE=FALSE; Runtimearg.gl_post=*gl; Runtimearg.domain_post=gl->domain; Runtimearg.np_post=np; if (!gl->cycle.RUNTIMEMODULEFOUND) fatal_error("The %s() module was not found within Cycle().",_CYCLE_ACTIONNAME); code=(char *)malloc((strlen(code_runtime)+2)*sizeof(char)); strcpy(code,code_runtime); codex->ACTION=TRUE; codex->action=&runtime_actions; codex->action_args=(void *)&Runtimearg; ((readcontrolarg_t *)codex->action_args)->np=&np; ((readcontrolarg_t *)codex->action_args)->gl=gl; /* if (codex->action_being_processed==NULL){ codex->action_being_processed=(char *)malloc((strlen(_CYCLE_ACTIONNAME)+2)*sizeof(char)); strcpy(codex->action_being_processed,_CYCLE_ACTIONNAME); }*/ codex->VERBOSE=FALSE; codex->SCREENOUTPUT=TRUE; add_constants_to_codex(gl, codex); update_runtime_codex_xi_from_gl(gl, codex); update_runtime_codex_vars_except_xi_from_gl(gl,codex); SOAP_process_code(code, codex, SOAP_VARS_KEEP_ALL); gl->CFL=SOAP_var_value(codex,"CFL"); #ifdef UNSTEADY gl->dt=SOAP_var_value(codex,"dt"); #endif gl->ximax=SOAP_var_value(codex,"ximax"); assert(gl->CFL>=0.0e0); /* here, make sure that all changes to vars within runtime module are erased, because those will not be written to datafile -> CFL and ximax and dt are exception to this, and this is why they are probed through SOAP_var_value above */ if (gl->RESETRUNTIMEVARS){ SOAP_free_all_vars(codex->vars); SOAP_copy_all_vars(varsmem,&(codex->vars)); } free(Runtimearg.input); SOAP_free_all_vars(varsmem); free(varsmem); free(code); reset_clipped_variables(gl); } void find_ximax(np_t *np, gl_t *gl, zone_t zone, int IJK_UPDATE){ long i,j,k; double xi; gl->ximax=0.0e0; for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)) { assert(is_node_resumed(np[_ai(gl,i,j,k)])); xi=np[_ai(gl,i,j,k)].wk->xi; if (xi<-1.0e99 || isnan(xi)) { fatal_error("problem with xi (xi=%E) at i=%ld, j=%ld, k=%ld.",xi,i,j,k); } if (xi>=gl->ximax) { gl->ximax=xi; if (IJK_UPDATE==IJK_UPDATE_YES) { gl->i_ximax=i; gl->j_ximax=j; gl->k_ximax=k; } } } } } /* static void PrintZones(zone_t *zones, long numzone){ long cnt; for (cnt=0; cnt<numzone; cnt++){ printf("%ld is=%ld js=%ld ie=%ld je=%ld\n",cnt,zones[cnt].is,zones[cnt].js, zones[cnt].ie,zones[cnt].je); } printf("\n"); } */ static void rearrange_overlapping_zones(zone_t *zones, long numzone){ long cnt1,cnt2; /* PrintZone(zones,numzones); */ for (cnt1=0; cnt1<numzone; cnt1++){ for (cnt2=0; cnt2<numzone; cnt2++){ if (cnt2!=cnt1){ /* do overlap along i : make ie of zones[cnt1] smaller and is of zones[cnt2] bigger */ if (if3DL( zones[cnt1].ks==zones[cnt2].ks && ) if2DL( zones[cnt1].js==zones[cnt2].js && ) if3DL( zones[cnt1].ke==zones[cnt2].ke && ) if2DL( zones[cnt1].je==zones[cnt2].je && ) zones[cnt1].ie< zones[cnt2].ie && zones[cnt1].ie>=zones[cnt2].is) { zones[cnt1].ie=(zones[cnt1].ie+zones[cnt2].is)/2; zones[cnt2].is=zones[cnt1].ie+1; if ( zones[cnt1].is>zones[cnt1].ie || zones[cnt2].is>zones[cnt2].ie ) fatal_error("Problem modifying zones along i."); } } } } #ifdef _2DL for (cnt1=0; cnt1<numzone; cnt1++){ for (cnt2=0; cnt2<numzone; cnt2++){ if (cnt2!=cnt1){ /* do overlap along j : make je of zones[cnt1] smaller and js of zones[cnt2] bigger*/ if (if3DL( zones[cnt1].ks==zones[cnt2].ks && ) zones[cnt1].is==zones[cnt2].is && if3DL( zones[cnt1].ke==zones[cnt2].ke && ) zones[cnt1].ie==zones[cnt2].ie && zones[cnt1].je< zones[cnt2].je && zones[cnt1].je>=zones[cnt2].js) { zones[cnt1].je=(zones[cnt1].je+zones[cnt2].js)/2; zones[cnt2].js=zones[cnt1].je+1; if ( zones[cnt1].js>zones[cnt1].je || zones[cnt2].js>zones[cnt2].je ) fatal_error("Problem modifying zones along j."); } } } } #endif #ifdef _3DL for (cnt1=0; cnt1<numzone; cnt1++){ for (cnt2=0; cnt2<numzone; cnt2++){ if (cnt2!=cnt1){ /* do overlap along k : make je of zones[cnt1] smaller and js of zones[cnt2] bigger*/ if (zones[cnt1].is==zones[cnt2].is && zones[cnt1].js==zones[cnt2].js && zones[cnt1].ie==zones[cnt2].ie && zones[cnt1].je==zones[cnt2].je && zones[cnt1].ke< zones[cnt2].ke && zones[cnt1].ke>=zones[cnt2].ks) { zones[cnt1].ke=(zones[cnt1].ke+zones[cnt2].ks)/2; zones[cnt2].ks=zones[cnt1].ke+1; if ( zones[cnt1].ks>zones[cnt1].ke || zones[cnt2].ks>zones[cnt2].ke ) fatal_error("Problem modifying zones along k."); } } } } #endif /* PrintZone(zone,numzone); */ } /* setup multizone situated inside zone */ void setup_multizone(np_t *np, gl_t *gl, zone_t zone, zone_t lim, double xiverge, long zonelength, bool UPDATE_ALL_ZONES, multizone_t *multizone){ long cnt; long numsubzones; zone_t *subzones; double ximax; long i,j,k; /* find the zones for the ts process */ subzones=(zone_t *)malloc(sizeof(zone_t)); find_subzones_in_zone_given_zonelength(zonelength, zone, &numsubzones, &subzones); /* find out which zones need to be updated */ multizone->numzones_ts=0; multizone->ts=(zone_t *)malloc(numsubzones*sizeof(zone_t)); for (cnt=0; cnt<numsubzones; cnt++){ ximax=0.0e0; for_ijk(subzones[cnt],is,js,ks,ie,je,ke){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)) { ximax=max(ximax,np[_ai(gl,i,j,k)].wk->xi); } } if (ximax>xiverge || UPDATE_ALL_ZONES) { multizone->ts[multizone->numzones_ts]=subzones[cnt]; (multizone->numzones_ts)++; } } /* setup res and bdry, limited by lim_is,lim_js, etc*/ multizone->bdry=(zone_t *)malloc(multizone->numzones_ts*sizeof(zone_t)); multizone->res=(zone_t *)malloc(multizone->numzones_ts*sizeof(zone_t)); for (cnt=0; cnt<multizone->numzones_ts; cnt++){ multizone->bdry[cnt].is=max(lim.is,multizone->ts[cnt].is-hbw_bdry_fluid); multizone->bdry[cnt].ie=min(lim.ie,multizone->ts[cnt].ie+hbw_bdry_fluid); #ifdef _2DL multizone->bdry[cnt].js=max(lim.js,multizone->ts[cnt].js-hbw_bdry_fluid); multizone->bdry[cnt].je=min(lim.je,multizone->ts[cnt].je+hbw_bdry_fluid); #endif #ifdef _3DL multizone->bdry[cnt].ks=max(lim.ks,multizone->ts[cnt].ks-hbw_bdry_fluid); multizone->bdry[cnt].ke=min(lim.ke,multizone->ts[cnt].ke+hbw_bdry_fluid); #endif multizone->res[cnt].is=max(lim.is,multizone->ts[cnt].is-hbw_bdry_fluid-hbw_res_fluid); multizone->res[cnt].ie=min(lim.ie,multizone->ts[cnt].ie+hbw_bdry_fluid+hbw_res_fluid); #ifdef _2DL multizone->res[cnt].js=max(lim.js,multizone->ts[cnt].js-hbw_bdry_fluid-hbw_res_fluid); multizone->res[cnt].je=min(lim.je,multizone->ts[cnt].je+hbw_bdry_fluid+hbw_res_fluid); #endif #ifdef _3DL multizone->res[cnt].ks=max(lim.ks,multizone->ts[cnt].ks-hbw_bdry_fluid-hbw_res_fluid); multizone->res[cnt].ke=min(lim.ke,multizone->ts[cnt].ke+hbw_bdry_fluid+hbw_res_fluid); #endif } multizone->numzones_total=numsubzones; multizone->numzones_res=multizone->numzones_ts; multizone->numzones_bdry=multizone->numzones_ts; free(subzones); rearrange_overlapping_zones(multizone->res,multizone->numzones_res); } void *thread_zone(void *threadzone){ np_t * np = ((threadzone_t *) threadzone)->np; gl_t * gl = ((threadzone_t *) threadzone)->gl; zone_t zone = ((threadzone_t *) threadzone)->zone; ((threadzone_t *) threadzone)->funct(np,gl,zone); return(NULL); } void create_thread_zone(np_t *np, gl_t * gl, zone_t zone, void (*funct)(np_t *, gl_t *, zone_t zone), pthread_t *pthread, threadzone_t *threadzone){ threadzone->np=np; threadzone->gl=gl; threadzone->zone=zone; threadzone->funct=funct; #ifdef ZONETHREADS if (pthread_create(pthread, NULL, thread_zone, threadzone)) fatal_error("Cannot create thread."); #else (*thread_zone)(threadzone); #endif } void join_all_threads_zone(long numthread, pthread_t *pthread, bool COUNTFLAG){ #ifdef ZONETHREADS long thread; void *retval; if (!COUNTFLAG) { for (thread=0; thread<numthread; thread++){ if (pthread_join(pthread[thread],&retval)) fatal_error("Cannot join thread %ld.",thread); } } #endif } static void update_U_from_dUstar_1(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ thread_lock_node_set(np,l,THREADTYPE_ZONE); add_dUstar_to_U(np,l,gl,np[l].wk->dUstar); thread_lock_node_unset(np,l,THREADTYPE_ZONE); /* - if not using SMALLTHREADS, only need to lock for the loop threads, since gl is local for the zone thread - if using SMALLTHREADS, then need to lock for both the loop and zone threads For now, lock for both the loop and zone threads */ thread_lock_global_set(gl,THREADTYPE_ALL); gl->effiter_U+=1.0/(double)(gl->nn); thread_lock_global_unset(gl,THREADTYPE_ALL); } } static void update_U_from_dUstar(np_t *np, gl_t *gl, zone_t zone){ sweep_with_1D_segments(np,gl,zone,&update_U_from_dUstar_1,SWEEPTYPE_I,TYPELEVEL_FLUID_WORK, &is_node_inner,SEGMENTWORK_HEAVY,GRIDLEVEL_ONE); } long _numthread_optimized(long numzone){ long l,cnt,lmax,numthread; numthread=numzone; if (numzone>maxzonethread) { lmax=0; for (cnt=1; cnt<=maxzonethread; cnt++){ l=mod(numzone,cnt); if (l==0) l=cnt; if (l>lmax) { numthread=cnt; lmax=l; } } } return(numthread); } #ifdef DISTMPI void exchange_U(np_t *np, gl_t *gl){ int bl,rankrecv,numproc,ranksend,thisrank,pack_size_Ulocal,pack_size_cnt; long i,j,k,flux,iterator,cnt,prevcnt,total; long primcnt=0; long bufsize=0; long *recvcnt,*sendcnt,*processcnt; long *primnodenums=NULL; long *processnodenums; double *buf,*bufptr; zone_t zonesend,zonerecv,zone; flux_t *recvUlocal; flux_t *sendUlocal=NULL; MPI_Status MPI_Status1; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); recvcnt=(long *)malloc(numproc*sizeof(long)); sendcnt=(long *)malloc(numproc*sizeof(long)); processcnt=(long *)malloc(numproc*sizeof(long)); processnodenums=(long *)malloc(numproc*sizeof(long)); for (i=0; i<numproc; i++){ sendcnt[i]=0; processcnt[i]=0; } for (ranksend=0; ranksend<numproc; ranksend++){ zonesend=_domain_from_rank(ranksend,gl); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){ zonerecv=_domain_lim_from_rank(rankrecv,gl); if (is_zone_intersecting_zone(zonesend,zonerecv)){ zone=_zone_intersection(zonesend,zonerecv); for_ijk(zone,is,js,ks,ie,je,ke){ if (ranksend==thisrank) { for (total=0,iterator=0; iterator<numproc; iterator++) total+=sendcnt[iterator]; for (prevcnt=0,iterator=0; iterator<=rankrecv; iterator++) prevcnt+=sendcnt[iterator]; sendUlocal=(flux_t *)realloc(sendUlocal,(total+1)*sizeof(flux_t)); for (iterator=prevcnt+1;iterator<total+1;iterator++){ for (flux=0; flux<nf; flux++) *(*(sendUlocal + iterator) + flux)=*(*(sendUlocal + iterator-1) + flux); } for (flux=0; flux<nf; flux++) *(*(sendUlocal + prevcnt) + flux)=np[_ai(gl,i,j,k)].bs->U[flux]; sendcnt[rankrecv]++; } if (rankrecv==thisrank){ for (prevcnt=0,iterator=0; iterator<=ranksend; iterator++) prevcnt+=processcnt[iterator]; processnodenums=(long *)realloc(processnodenums,(prevcnt+1)*sizeof(long)); processnodenums[prevcnt]=_ai(gl,i,j,k); processcnt[ranksend]++; if (is_node_resumed(np[_ai(gl,i,j,k)])){ primnodenums=(long *)realloc(primnodenums,(primcnt+1)*sizeof(long)); primnodenums[primcnt]=_ai(gl,i,j,k); primcnt++; } } } } } } } if(numproc != 1){ for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (thisrank!=rankrecv){ MPI_Pack_size(nf*sendcnt[rankrecv],MPI_DOUBLE,MPI_COMM_WORLD,&pack_size_Ulocal); MPI_Pack_size(1,MPI_LONG,MPI_COMM_WORLD,&pack_size_cnt); bufsize+=(2*MPI_BSEND_OVERHEAD)+pack_size_Ulocal+pack_size_cnt; } } buf=(double *)malloc(bufsize); MPI_Buffer_attach(buf, bufsize); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (thisrank!=rankrecv){ for (prevcnt=0,iterator=0; iterator<rankrecv; iterator++) prevcnt+=sendcnt[iterator]; MPI_Bsend(&sendcnt[rankrecv],1,MPI_LONG,rankrecv,1,MPI_COMM_WORLD); MPI_Bsend(&sendUlocal[prevcnt],nf*sendcnt[rankrecv],MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD); } } free(sendUlocal); for (ranksend=0; ranksend<numproc; ranksend++){ if (thisrank!=ranksend){ MPI_Recv(&recvcnt[ranksend],1,MPI_LONG,ranksend,1,MPI_COMM_WORLD,&MPI_Status1); recvUlocal=(flux_t *)malloc(recvcnt[ranksend]*sizeof(flux_t)); MPI_Recv(recvUlocal,recvcnt[ranksend]*nf,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); for (cnt=0; cnt<recvcnt[ranksend]; cnt++){ for (prevcnt=0,iterator=0; iterator<ranksend; iterator++) prevcnt+=processcnt[iterator]; for (flux=0; flux<nf; flux++) np[processnodenums[prevcnt+cnt]].bs->U[flux]=*(*(recvUlocal + cnt) + flux); } free(recvUlocal); } } #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<primcnt; cnt++) find_prim_fluid(np,primnodenums[cnt],gl); MPI_Buffer_detach(&bufptr,&bl); free(buf); } free(processnodenums); free(primnodenums); free(processcnt); free(recvcnt); free(sendcnt); MPI_Barrier(MPI_COMM_WORLD); } void exchange_U_old(np_t *np, gl_t *gl){ //same as above but without the MPI_Buffer int rankrecv,numproc,ranksend,thisrank; long i,j,k,flux; long cnt = 0; long *nodenums = NULL; zone_t zonesend,zonerecv,zone; flux_t Ulocal; MPI_Status MPI_Status1; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); for (ranksend=0; ranksend<numproc; ranksend++){ zonesend=_domain_from_rank(ranksend,gl); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){ zonerecv=_domain_lim_from_rank(rankrecv,gl); if (is_zone_intersecting_zone(zonesend,zonerecv)){ zone=_zone_intersection(zonesend,zonerecv); for_ijk(zone,is,js,ks,ie,je,ke){ if (ranksend==thisrank) { for (flux=0; flux<nf; flux++) Ulocal[flux]=np[_ai(gl,i,j,k)].bs->U[flux]; MPI_Send(Ulocal,nf,MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD); } if (rankrecv==thisrank) { MPI_Recv(Ulocal,nf,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); for (flux=0; flux<nf; flux++) np[_ai(gl,i,j,k)].bs->U[flux]=Ulocal[flux]; if (is_node_resumed(np[_ai(gl,i,j,k)])){ nodenums=(long *)realloc(nodenums,(cnt+1)*sizeof(long)); nodenums[cnt]=_ai(gl,i,j,k); cnt++; } } } } } } } #ifdef OPENMPTHREADS #pragma omp parallel for private(i) schedule(dynamic) #endif for (i=0; i<cnt; i++) find_prim_fluid(np,nodenums[i],gl); free(nodenums); MPI_Barrier(MPI_COMM_WORLD); } #endif void update_U_with_multizone(np_t *np, gl_t *gl, multizone_t multizone){ long cnt,numzonethread,cntthread; pthread_t *pthread; threadzone_t *threadzone; /* Find dUstar for inner nodes*/ numzonethread=_numthread_optimized(multizone.numzones_ts); pthread=(pthread_t *)malloc(numzonethread*sizeof(pthread_t)); threadzone=(threadzone_t *)malloc(numzonethread*sizeof(threadzone_t)); cntthread=0; for (cnt=0; cnt<multizone.numzones_ts; cnt++) { create_thread_zone(np, gl, multizone.ts[cnt], &find_dU, &(pthread[cntthread]), &(threadzone[cntthread])); cntthread++; if (cntthread==numzonethread) { join_all_threads_zone(cntthread, pthread, FALSE); cntthread=0; } } if (cntthread>0) join_all_threads_zone(cntthread, pthread, FALSE); for (cnt=0; cnt<multizone.numzones_ts; cnt++) update_U_from_dUstar(np, gl, multizone.ts[cnt]); free(pthread); free(threadzone); } void update_bdry_nodes_with_multizone(np_t *np, gl_t *gl, multizone_t multizone){ long cnt; for (cnt=0; cnt<multizone.numzones_bdry; cnt++) update_bdry_nodes(np, gl, multizone.bdry[cnt]); } void find_residual_with_multizone(np_t *np, gl_t *gl, multizone_t multizone){ long cnt,numzonethread,cntthread; pthread_t *pthread; threadzone_t *threadzone; numzonethread=_numthread_optimized(multizone.numzones_res); pthread=(pthread_t *)malloc(numzonethread*sizeof(pthread_t)); threadzone=(threadzone_t *)malloc(numzonethread*sizeof(threadzone_t)); cntthread=0; for (cnt=0; cnt<multizone.numzones_res; cnt++) { create_thread_zone(np, gl, multizone.res[cnt], &find_residual, &(pthread[cntthread]), &(threadzone[cntthread])); cntthread++; if (cntthread==numzonethread) { join_all_threads_zone(cntthread, pthread, FALSE); cntthread=0; } } if (cntthread>0) { join_all_threads_zone(cntthread, pthread, FALSE); } free(pthread); free(threadzone); } void solve_multizone(np_t *np, gl_t *gl, multizone_t multizone){ update_U_with_multizone(np,gl,multizone); update_bdry_nodes_with_multizone(np,gl,multizone); find_residual_with_multizone(np,gl,multizone); } void free_multizone(multizone_t *multizone){ free(multizone->res); free(multizone->bdry); free(multizone->ts); } void check_residual(np_t *np, gl_t *gl, zone_t zone){ resume_nodes_in_zone(np, gl, zone); #ifdef EMFIELD update_prim_emfield_mem_in_zone(np, gl, zone); #endif find_residual(np, gl, zone); find_ximax(np,gl,zone,IJK_UPDATE_YES); #ifdef EMFIELD find_residual_emfield(np,gl,zone); find_ximax_emfield(np, gl, zone); #endif #ifdef DISTMPI int rank,proc; struct { double ximax; int rank; } ximaxrank,ximaxrank_max; #ifdef EMFIELD struct { double ximax; int rank; } ximaxrank_emfield,ximaxrank_max_emfield; #endif MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &proc); ximaxrank.ximax=gl->ximax; ximaxrank.rank=rank; MPI_Allreduce(&ximaxrank, &ximaxrank_max, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD); gl->ximax=ximaxrank_max.ximax; MPI_Bcast(&(gl->i_ximax),1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); #ifdef EMFIELD ximaxrank_emfield.ximax=gl->ximax_emfield; ximaxrank_emfield.rank=rank; MPI_Allreduce(&ximaxrank_emfield, &ximaxrank_max_emfield, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD); gl->ximax_emfield=ximaxrank_max_emfield.ximax; MPI_Bcast(&(gl->i_ximax_emfield),1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); #endif #ifdef _2DL MPI_Bcast(&(gl->j_ximax),1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); #ifdef EMFIELD MPI_Bcast(&(gl->j_ximax_emfield),1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); #endif #endif //_2DL #ifdef _3DL MPI_Bcast(&(gl->k_ximax),1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); #ifdef EMFIELD MPI_Bcast(&(gl->k_ximax_emfield),1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); #endif #endif //_3DL #endif //DISTMPI } double _xi(np_t np, gl_t *gl, flux_t Res){ long flux; double xi,xitmp; assert_np(np,is_node_resumed(np)); xi=0.0; for (flux=0; flux<nf; flux++) { xitmp=fabs(Res[flux]/_Omega(np,gl)/gl->cycle.fluid.Uref[flux]); xi=max(xi,xitmp); if (isnan(xitmp)){ fatal_error("problem computing xitmp in function _xi() in cycle_share.c;\n xitmp=%E\n Res[%ld]=%E\n Omega=%E\n Uref[%ld]=%E\n",xitmp,flux,Res[flux],_Omega(np,gl),flux,gl->cycle.fluid.Uref[flux]); } } return(xi); } static void find_Delta_Lambda_for_dtau_local(np_t *np, gl_t *gl, long l, long dim, flux_t Delta_Lambda){ long offset,maxoffset,flux,dim2; flux_t Delta_Lambda_tmp; find_Delta_Lambda_for_dtau(np, gl, l, dim, Delta_Lambda); if (gl->PRECONDITIONER==PRECON_LOCALTIMESTEP2){ maxoffset=1; for (dim2=dim; dim2<=dim; dim2++){ for (offset=1; offset<=maxoffset; offset++){ if (is_node_inner(np[_al(gl,l,dim2,-offset)],TYPELEVEL_FLUID_WORK)){ find_Delta_Lambda_for_dtau(np, gl, _al(gl,l,dim2,-offset), dim, Delta_Lambda_tmp); for (flux=0; flux<nf; flux++) Delta_Lambda[flux]=max(Delta_Lambda[flux],Delta_Lambda_tmp[flux]); } if (is_node_inner(np[_al(gl,l,dim2,+offset)],TYPELEVEL_FLUID_WORK)){ find_Delta_Lambda_for_dtau(np, gl, _al(gl,l,dim2,+offset), dim, Delta_Lambda_tmp); for (flux=0; flux<nf; flux++) Delta_Lambda[flux]=max(Delta_Lambda[flux],Delta_Lambda_tmp[flux]); } } } } } void find_dtau(np_t *np, gl_t *gl, long l, flux_t dtau){ double dtaumin,dtaumax; long dim,flux; double dtaulocal[nf][nd]; #ifdef UNSTEADY sqmat_t LambdaZ; #endif flux_t Delta_Lambda; assert_np(np[l],is_node_inner(np[l],TYPELEVEL_FLUID_WORK)); if (gl->PRECONDITIONER!=PRECON_CONSTANTTIMESTEP){ #ifdef UNSTEADY find_LambdaZ(np,gl,l,LambdaZ); set_matrix_to_identity(LambdaZ); //turn off effect of LambdaZ -> seems to be detrimental not beneficial for (dim=0; dim<nd; dim++){ find_Delta_Lambda_for_dtau_local(np, gl, l, dim, Delta_Lambda); for (flux=0; flux<nf; flux++){ assert(LambdaZ[flux][flux]>0.0); dtaulocal[flux][dim]=gl->dt/LambdaZ[flux][flux]/notzero(Delta_Lambda[flux]*gl->dt/LambdaZ[flux][flux]+1.0,1e-39); } } #else for (dim=0; dim<nd; dim++){ find_Delta_Lambda_for_dtau_local(np, gl, l, dim, Delta_Lambda); for (flux=0; flux<nf; flux++){ dtaulocal[flux][dim]=1.0/notzero(Delta_Lambda[flux],1e-39); } } #endif /* find optimal dtaus for each flux */ for (flux=0; flux<nf; flux++){ dtaumin=1.0e99; dtaumax=0.0e0; for (dim=0; dim<nd; dim++){ dtaumin=min(dtaulocal[flux][dim],dtaumin); dtaumax=max(dtaulocal[flux][dim],dtaumax); } dtaumax=min(dtaumin*MAXRATIO_DTAUMAX_DTAUMIN,dtaumax); dtau[flux]=gl->CFL*pow(dtaumin,1.0e0-gl->sigma1)*pow(dtaumax,gl->sigma1); } } else { for (flux=0; flux<nf; flux++){ dtau[flux]=gl->dtau; } } } void find_constant_dtau(np_t *np, gl_t *gl, long l, double *dtau){ long flux; flux_t dtau_vector; double dtaumin,dtaumax; find_dtau(np,gl,l,dtau_vector); /* average min and max dtau */ dtaumin=1.0e99; dtaumax=-1.0e99; for (flux=0; flux<nf; flux++) dtaumin=min(dtaumin,dtau_vector[flux]); for (flux=0; flux<nf; flux++) dtaumax=max(dtaumax,dtau_vector[flux]); dtaumax=min(dtaumin*MAXRATIO_DTAUMAX_DTAUMIN,dtaumax); *dtau=pow(dtaumin,1.0-gl->sigma2)*pow(dtaumax,gl->sigma2); } #ifdef EMFIELD #ifdef DISTMPI void exchange_U_emfield(np_t *np, gl_t *gl){ int rankrecv,numproc,ranksend,thisrank; long i,j,k,flux; zone_t zonesend,zonerecv,zone; fluxemfield_t Ulocal; MPI_Status MPI_Status1; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); for (ranksend=0; ranksend<numproc; ranksend++){ zonesend=_domain_from_rank(ranksend,gl); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){ zonerecv=_domain_lim_from_rank(rankrecv,gl); if (is_zone_intersecting_zone(zonesend,zonerecv)){ zone=_zone_intersection(zonesend,zonerecv); for_ijk(zone,is,js,ks,ie,je,ke){ if (ranksend==thisrank) { for (flux=0; flux<nfe; flux++) Ulocal[flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux]; MPI_Send(Ulocal,nfe,MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD); } if (rankrecv==thisrank) { MPI_Recv(Ulocal,nfe,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); for (flux=0; flux<nfe; flux++) np[_ai(gl,i,j,k)].bs->Uemfield[flux]=Ulocal[flux]; } } } } } } MPI_Barrier(MPI_COMM_WORLD); } void exchange_U_emfield_old(np_t *np, gl_t *gl){ int rank; long i,j,k,flux; fluxemfield_t Ulocal; MPI_Comm_rank(MPI_COMM_WORLD, &rank); for_ijk (gl->domain_all,is,js,ks,ie,je,ke){ if (rank==_node_rank(gl, i, j, k) && is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { for (flux=0; flux<nfe; flux++) { Ulocal[flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux]; } } MPI_Bcast_Node(Ulocal,nfe,MPI_DOUBLE,_node_rank(gl,i,j,k),MPI_COMM_WORLD,i,j,k,gl); if (is_node_in_zone(i,j,k,gl->domain_lim) && is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { for (flux=0; flux<nfe; flux++) { np[_ai(gl,i,j,k)].bs->Uemfield[flux]=Ulocal[flux]; } } } MPI_Barrier(MPI_COMM_WORLD); } #endif void update_prim_emfield_mem_in_zone_1(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; //printf("(%ld,%ld) to (%ld,%ld)\n",_i(ls,gl,0),_i(ls,gl,1),_i(le,gl,0),_i(le,gl,1)); for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_valid(np[l],TYPELEVEL_EMFIELD)){ find_prim_emfield_mem_1(np, gl, l); } } } void update_prim_emfield_mem_in_zone_2(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_valid(np[l],TYPELEVEL_EMFIELD)){ find_prim_emfield_mem_2(np, gl, l); } } } void update_prim_emfield_mem_in_zone_3(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_valid(np[l],TYPELEVEL_EMFIELD)){ find_prim_emfield_mem_3(np, gl, l); } } } #ifdef _TSEMF_STORE_COEFFICIENTS void update_prim_emfield_mem_in_zone_4(np_t *np, gl_t *gl, long theta, long ls, long le){ long l,dim,flux; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_inner(np[l],TYPELEVEL_EMFIELD)){ for (flux=0; flux<nfe; flux++){ find_dtau_emfield(np,gl,l,flux,&(np[l].bs->dtauemfield[flux])); np[l].bs->coeffp0sum[flux]=0.0; for (dim=0; dim<nd; dim++){ find_linearization_coefficients_inner_node_emfield(np, gl, l, dim, flux, &(np[l].bs->coeffm1[dim][flux]), &(np[l].bs->coeffp0[dim][flux]), &(np[l].bs->coeffp1[dim][flux])); np[l].bs->coeffp0sum[flux]+=np[l].bs->coeffp0[dim][flux]; } } } } } #endif void update_prim_emfield_mem_in_zone(np_t *np, gl_t *gl, zone_t zone){ sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_1,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_2,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_3,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); #ifdef _TSEMF_STORE_COEFFICIENTS sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_4,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); #endif } void add_convection_residual_emfield(long theta, long ls, long le, np_t *np, gl_t *gl){ long l,flux; fluxemfield_t Fm1h; for (l=ls; l!=_l_plus_one(_l_plus_one(le,gl,theta),gl,theta); l=_l_plus_one(l,gl,theta)){ find_Fstar_interface_emfield(np,gl,_al(gl,l,theta,-1),_al(gl,l,theta,+0),theta,Fm1h); for (flux=0; flux<nfe; flux++){ if (l!=_l_plus_one(le,gl,theta)) np[l].bs->Resemfield[flux]-=Fm1h[flux]; if (l!=ls) np[_al(gl,l,theta,-1)].bs->Resemfield[flux]+=Fm1h[flux]; } } } void add_source_residual_emfield(long theta, long ls, long le, np_t *np, gl_t *gl){ long l; long flux; fluxemfield_t S; if (theta==0) { for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ find_Sstar_emfield(np,gl,l,S); for (flux=0; flux<nfe; flux++) np[l].bs->Resemfield[flux]-=S[flux]; } } } void update_residual_emfield(np_t *np, gl_t *gl, long theta, long ls, long le){ add_convection_residual_emfield(theta,ls,le,np,gl); add_source_residual_emfield(theta,ls,le,np,gl); } void initialize_residual_emfield(np_t *np, gl_t *gl, long theta, long ls, long le){ long l,flux; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ for (flux=0; flux<nfe; flux++) np[l].bs->Resemfield[flux]=0.0e0; gl->effiter_R_emfield+=1.0e0/(double)gl->nn; } } void update_bdry_node_emfield(np_t *np, gl_t *gl, long l){ long dim,l_C,l_B,l_A; long dimsgn; bool BDRYDIRECFOUND; #ifdef _2DL long dim1; long dim2; #endif #ifdef _3D long dim3; #endif bool UPDATED; assert(is_node_bdry(np[l],TYPELEVEL_EMFIELD)); UPDATED=FALSE; BDRYDIRECFOUND=find_bdry_direc(np, gl, l, TYPELEVEL_EMFIELD, &dim, &dimsgn); if (is_node_link(np[l],TYPELEVEL_EMFIELD)) { // in case the boundary node is a link, Uemf has already been updated UPDATED=TRUE; } if (!UPDATED && BDRYDIRECFOUND){ l_A=l; l_B=_al(gl,l,dim,dimsgn); l_C=_al(gl,l,dim,dimsgn*2); assert(is_node_inner(np[l_C],TYPELEVEL_EMFIELD)); assert(is_node_inner(np[l_B],TYPELEVEL_EMFIELD)); update_bdry_emfield(np,gl,l_A,l_B,l_C,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_EMFIELD); UPDATED=TRUE; } /* now, do the corners */ if (!UPDATED) { #ifdef _2D for (dim1=-1; dim1<=1; dim1++){ for (dim2=-1; dim2<=1; dim2++){ l_C=_all(gl,l,0,dim1*2,1,dim2*2); l_B=_all(gl,l,0,dim1,1,dim2); l_A=l; if ( is_node_inner(np[l_B],TYPELEVEL_EMFIELD) && is_node_inner(np[l_C],TYPELEVEL_EMFIELD) && !UPDATED){ update_bdry_emfield(np,gl,l_A,l_B,l_C,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_EMFIELD); UPDATED=TRUE; } } } #endif #ifdef _3D for (dim1=-1; dim1<=1; dim1++){ for (dim2=-1; dim2<=1; dim2++){ for (dim3=-1; dim3<=1; dim3++){ l_C=_al(gl, _al(gl, _al(gl,l,0,dim1*2), 1,dim2*2), 2,dim3*2); l_B=_al(gl, _al(gl, _al(gl,l,0,dim1), 1,dim2), 2,dim3); l_A=l; if ( is_node_inner(np[l_B],TYPELEVEL_EMFIELD) && is_node_inner(np[l_C],TYPELEVEL_EMFIELD) && !UPDATED){ update_bdry_emfield(np,gl,l_A,l_B,l_C,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_EMFIELD); UPDATED=TRUE; } } } } #endif } } void update_bdry_nodes_on_segment_emfield(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_bdry(np[l],TYPELEVEL_EMFIELD)){ thread_lock_node_set(np,l,THREADTYPE_ZONE); update_bdry_node_emfield(np, gl, l); thread_lock_node_unset(np,l,THREADTYPE_ZONE); } } } void update_bdry_nodes_emfield(np_t *np, gl_t *gl, zone_t zone){ sweep_with_1D_segments(np, gl, zone, &update_bdry_nodes_on_segment_emfield, SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); } void find_residual_emfield(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; /* now, let's find the residual and store it in bs->dUstaremfield*/ sweep_with_1D_segments(np,gl,zone,&initialize_residual_emfield, SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); sweep_with_1D_segments(np,gl,zone,&update_residual_emfield, SWEEPTYPE_IJK, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_HEAVY,GRIDLEVEL_ONE); /* let's find max residual, and put it in gl*/ for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { np[_ai(gl,i,j,k)].bs->_xi_emfield=_xi_emfield(np[_ai(gl,i,j,k)],gl,np[_ai(gl,i,j,k)].bs->Resemfield); } } } void find_ximax_emfield(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; gl->ximax_emfield=0.0e0; for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD) && np[_ai(gl,i,j,k)].bs->_xi_emfield>=gl->ximax_emfield) { gl->ximax_emfield=np[_ai(gl,i,j,k)].bs->_xi_emfield; gl->i_ximax_emfield=i; gl->j_ximax_emfield=j; gl->k_ximax_emfield=k; } } } void read_UpdateEMField_arguments(char **argum, SOAP_codex_t *codex, gl_t *gl){ SOAP_substitute_all_argums(argum, codex); gl->Lc=SOAP_get_argum_double(codex,*argum,0); gl->relaxEMF=SOAP_get_argum_double(codex,*argum,1); gl->numsubiter_tsemf=4; /* make the default number of subiterations equal to 4 */ gl->tsemfmethod=TSEMF_DEFAULT; if (gl->Lc<=0.0) fatal_error("The length scale Lc must be positive when calling UpdateEMField()."); if (gl->relaxEMF<=0.0) fatal_error("The relaxation factor relaxEMF must be positive when calling UpdateEMField()."); if (gl->relaxEMF>2.0) fatal_error("The relaxation factor relaxEMF must be less than 2 when calling UpdateEMField()."); if (gl->numsubiter_tsemf<=0.0) fatal_error("The number of subiterations subiter_tsemf must be positive when calling UpdateEMField()."); #ifdef UNSTEADY gl->dt=SOAP_get_argum_double(codex,*argum,2); if (gl->dt<=0.0) fatal_error("The time step dt must be positive when calling UpdateEMField()."); if (SOAP_number_argums(*argum)>3) gl->tsemfmethod=SOAP_get_argum_long(codex,*argum,3); if (SOAP_number_argums(*argum)>4){ if (gl->tsemfmethod==TSEMF_SOR || gl->tsemfmethod==TSEMF_SOR2 || gl->tsemfmethod==TSEMF_ADIIMAF || gl->tsemfmethod==TSEMF_IMAF || gl->tsemfmethod==TSEMF_IMAFk || gl->tsemfmethod==TSEMF_IMAFi) gl->numsubiter_tsemf=SOAP_get_argum_long(codex,*argum,4); else fatal_error("UpdateEMField accepts the number of subiterations as a 5th argument only if TSEMF_SOR, TSEMF_SOR2, TSEMF_ADIIMAF, TSEMF_IMAF, TSMEF_IMAFk, TSMEF_IMAFi is specified."); } #else if (SOAP_number_argums(*argum)>2) gl->tsemfmethod=SOAP_get_argum_long(codex,*argum,2); if (SOAP_number_argums(*argum)>3) { if (gl->tsemfmethod==TSEMF_SOR || gl->tsemfmethod==TSEMF_SOR2 || gl->tsemfmethod==TSEMF_ADIIMAF || gl->tsemfmethod==TSEMF_IMAF || gl->tsemfmethod==TSEMF_IMAFk || gl->tsemfmethod==TSEMF_IMAFi) gl->numsubiter_tsemf=SOAP_get_argum_long(codex,*argum,3); else fatal_error("UpdateEMField accepts the number of subiterations as a 4th argument only if TSEMF_SOR, TSEMF_SOR2, TSEMF_ADIIMAF, TSEMF_IMAF, TSEMF_IMAFk, TSMEF_IMAFi is specified."); } #endif } void solve_TDMA_emfield(np_t *np, gl_t *gl, long theta, long ls, long le, int TYPELEVEL, EXM_tdmaline_t *tdma, long numlines){ #ifdef DISTMPI long line,cnt,i,j,k,i_s,j_s,k_s; double tmp; MPI_Status MPI_Status1; if (gl->EM_MPIBDRY_EXPLICIT){ EXM_solve_TDMA(tdma, numlines); } else { /* if ls node is inner node, need to obtain the tdma[0] from another process that owns ls */ if (is_node_inner(np[ls],TYPELEVEL)){ find_ijk_from_l(gl, ls, &i, &j, &k); assert(_ai_all(gl,i,j,k)<LONG_MAX); if (MPI_Recv(tdma[0].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i,j,k),MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in solve_TDMA_emfield"); assert(tdma[0].val[0]==0.0); } for (line=0; line<numlines-1; line++){ assert(tdma[line].val[1]!=0.0); tmp = -(tdma[line+1].val[0] / tdma[line].val[1]); for (cnt = 1; cnt <= 2; cnt++) tdma[line+1].val[cnt - 1] += tdma[line].val[cnt] * tmp; tdma[line+1].val[3] += tdma[line].val[3] * tmp; tdma[line+1].val[0] = 0.0; } /* if le node is inner node, need to send the tdma[numlines-2] to another process that owns le */ if (is_node_inner(np[le],TYPELEVEL)){ find_ijk_from_l(gl, le, &i, &j, &k); find_ijk_from_l(gl, _l_minus_one(le,gl,theta), &i_s, &j_s, &k_s); assert(_ai_all(gl,i,j,k)<LONG_MAX); if (MPI_Send(tdma[numlines-2].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i_s,j_s,k_s),MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in solve_TDMA_emfield"); } /* if le node is inner node, need to obtain the tdma[numlines-1] from another process that owns le */ if (is_node_inner(np[le],TYPELEVEL)){ find_ijk_from_l(gl, le, &i, &j, &k); assert(_ai_all(gl,i,j,k)<LONG_MAX); if (MPI_Recv(tdma[numlines-1].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i,j,k),MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in solve_TDMA_emfield"); assert(tdma[numlines-1].val[2]==0.0); } for (line=numlines-1; line>0; line--){ assert(tdma[line].val[1]!=0.0); tdma[line].val[3] /= tdma[line].val[1]; tdma[line].val[1] = 1.0; tdma[line-1].val[3] -= tdma[line].val[3] * tdma[line-1].val[2]; tdma[line-1].val[2] = 0.0; } assert(tdma[0].val[1]!=0.0); tdma[0].val[3] /= tdma[0].val[1]; tdma[0].val[1] = 1.0; /* if ls node is inner node, need to send the tdma[1] to another process that owns ls */ if (is_node_inner(np[ls],TYPELEVEL)){ find_ijk_from_l(gl, ls, &i, &j, &k); find_ijk_from_l(gl, _l_plus_one(ls,gl,theta), &i_s, &j_s, &k_s); assert(_ai_all(gl,i,j,k)<LONG_MAX); if (MPI_Send(tdma[1].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i_s,j_s,k_s),MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in solve_TDMA_emfield"); } } #else EXM_solve_TDMA(tdma, numlines); #endif } #endif//EMFIELD
globals.h
#include <iostream> using std::cout; using std::endl; #ifndef _flops_globals_H #define _flops_globals_H //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// #include <stdint.h> #include <string.h> #include <omp.h> #include <memory> #include "tools.h" namespace Flops{ //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// typedef uint64_t largeint_t; const double TEST_ADD_ADD = 1.4142135623730950488; const double TEST_ADD_SUB = 1.414213562373095; const double TEST_MUL_MUL = 1.4142135623730950488; const double TEST_MUL_DIV = 0.70710678118654752440; const double TEST_FMA_LINEAR_MUL0 = 1.4142135623730950488; const double TEST_FMA_LINEAR_MUL1 = 1.7320508075688772935; const double TEST_FMA_HORNER_MUL0 = 1.4142135623730950488; const double TEST_FMA_HORNER_ADD0 = 1.7320508075688772935; const double TEST_FMA_HORNER_MUL1 = 0.70710678118654752440; const double TEST_FMA_HORNER_ADD1 = -1.2247448713915890491; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// class benchmark{ virtual void print_meta() const = 0; virtual largeint_t run_loop(largeint_t iterations, double &result) const = 0; public: void run(largeint_t iterations) const{ print_meta(); double result; wclk start = wclk_now(); iterations = run_loop(iterations, result); double seconds = wclk_secs_since(start); cout << " Result = " << result << endl; cout << " FP Ops = " << iterations << endl; cout << " seconds = " << seconds << endl; cout << " GFlops = " << iterations / seconds / 1.e9 << endl; cout << endl; } void run(largeint_t iterations, size_t threads) const{ print_meta(); auto thread_result = std::unique_ptr<double[]>(new double[threads]); auto thread_iterations = std::unique_ptr<largeint_t[]>(new largeint_t[threads]); memset(thread_result.get() , 0, threads * sizeof(double)); memset(thread_iterations.get(), 0, threads * sizeof(largeint_t)); wclk start = wclk_now(); #pragma omp parallel num_threads((int)threads) { size_t thread_id = omp_get_thread_num(); double result; thread_iterations[thread_id] = run_loop(iterations, result); thread_result[thread_id] = result; } double seconds = wclk_secs_since(start); double result = 0; iterations = 0; for (size_t i = 0; i < threads; i++){ result += thread_result[i]; iterations += thread_iterations[i]; } cout << " Result = " << result << endl; cout << " FP Ops = " << iterations << endl; cout << " seconds = " << seconds << endl; cout << " GFlops = " << iterations / seconds / 1.e9 << endl; cout << endl; } }; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// } #endif
test-mempool.c
#include <pthread.h> #include <omp.h> #include <xztl.h> #include <xztl-mempool.h> #include <ztl-media.h> #include "CUnit/Basic.h" static const char **devname; static void cunit_mempool_assert_ptr (char *fn, void *ptr) { CU_ASSERT ((uint64_t) ptr != 0); if (!ptr) printf ("\n %s: ptr %p\n", fn, ptr); } static void cunit_mempool_assert_int (char *fn, int status) { CU_ASSERT (status == 0); if (status) printf (" %s: %x\n", fn, status); } static int cunit_mempool_init (void) { return 0; } static int cunit_mempool_exit (void) { return 0; } static void test_mempool_init (void) { int ret; ret = znd_media_register (*devname); cunit_mempool_assert_int ("znd_media_register", ret); if (ret) return; ret = xztl_media_init (); cunit_mempool_assert_int ("xztl_media_init", ret); if (ret) return; cunit_mempool_assert_int ("xztl_mempool_init", xztl_mempool_init ()); } static void test_mempool_create (void) { uint16_t type, tid, ents; uint32_t ent_sz; type = XZTL_MEMPOOL_MCMD; tid = 0; ents = 32; ent_sz = 1024; cunit_mempool_assert_int ("xztl_mempool_create", xztl_mempool_create (type, tid, ents, ent_sz, NULL, NULL)); } static void test_mempool_destroy (void) { uint16_t type, tid; type = XZTL_MEMPOOL_MCMD; tid = 0; cunit_mempool_assert_int ("xztl_mempool_destroy", xztl_mempool_destroy (type, tid)); } static void test_mempool_create_mult (void) { uint16_t type, tid, ents; uint32_t ent_sz; type = XZTL_MEMPOOL_MCMD; ents = 32; ent_sz = 128; #pragma omp parallel for for (tid = 0; tid < 8; tid++) { cunit_mempool_assert_int ("xztl_mempool_create", xztl_mempool_create (type, tid, ents, ent_sz, NULL, NULL)); } } static void test_mempool_get_put (void) { uint16_t ent_i, ents = 30; uint32_t ent_sz = 128; struct xztl_mp_entry *ent[ents]; /* Get entries */ for (ent_i = 0; ent_i < ents; ent_i++) { ent[ent_i] = xztl_mempool_get (XZTL_MEMPOOL_MCMD, 0); cunit_mempool_assert_ptr ("xztl_mempool_get", ent[ent_i]); } /* Modify entry bytes */ for (ent_i = 0; ent_i < ents; ent_i++) memset (ent[ent_i]->opaque, 0x0, ent_sz); /* Put entries */ for (ent_i = 0; ent_i < ents; ent_i++) { xztl_mempool_put (ent[ent_i], XZTL_MEMPOOL_MCMD, 0); CU_PASS ("xztl_mempool_put"); } /* Repeat the process */ for (ent_i = 0; ent_i < ents; ent_i++) { ent[ent_i] = xztl_mempool_get (XZTL_MEMPOOL_MCMD, 0); cunit_mempool_assert_ptr ("xztl_mempool_get", ent[ent_i]); } for (ent_i = 0; ent_i < ents; ent_i++) memset (ent[ent_i]->opaque, 0x0, ent_sz); for (ent_i = 0; ent_i < ents; ent_i++) { xztl_mempool_put (ent[ent_i], XZTL_MEMPOOL_MCMD, 0); CU_PASS ("xztl_mempool_put"); } } static void test_mempool_exit (void) { cunit_mempool_assert_int ("xztl_mempool_exit", xztl_mempool_exit ()); cunit_mempool_assert_int ("xztl_media_exit", xztl_media_exit ()); } int main (int argc, const char **argv) { int failed; if (argc < 2) { printf ("Please provide the device path. e.g. liou:/dev/nvme0n2\n"); return -1; } devname = &argv[1]; printf ("Device: %s\n", *devname); CU_pSuite pSuite = NULL; if (CUE_SUCCESS != CU_initialize_registry()) return CU_get_error(); pSuite = CU_add_suite("Suite_mempool", cunit_mempool_init, cunit_mempool_exit); if (pSuite == NULL) { CU_cleanup_registry(); return CU_get_error(); } if ((CU_add_test (pSuite, "Initialize", test_mempool_init) == NULL) || (CU_add_test (pSuite, "Create a mempool", test_mempool_create) == NULL) || (CU_add_test (pSuite, "Destroy a mempool", test_mempool_destroy) == NULL) || (CU_add_test (pSuite, "Create parallel mempools", test_mempool_create_mult) == NULL) || (CU_add_test (pSuite, "Get and put entries", test_mempool_get_put) == NULL) || (CU_add_test (pSuite, "Closes the module", test_mempool_exit) == NULL)){ CU_cleanup_registry(); return CU_get_error(); } CU_basic_set_mode(CU_BRM_VERBOSE); CU_basic_run_tests(); failed = CU_get_number_of_tests_failed(); CU_cleanup_registry(); return failed; }
vector.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_Vector class. * *****************************************************************************/ #include "seq_mv.h" /*-------------------------------------------------------------------------- * hypre_SeqVectorCreate *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorCreate( HYPRE_Int size ) { hypre_Vector *vector; vector = hypre_CTAlloc(hypre_Vector, 1, HYPRE_MEMORY_HOST); hypre_VectorData(vector) = NULL; hypre_VectorSize(vector) = size; hypre_VectorNumVectors(vector) = 1; hypre_VectorMultiVecStorageMethod(vector) = 0; /* set defaults */ hypre_VectorOwnsData(vector) = 1; hypre_VectorMemoryLocation(vector) = hypre_HandleMemoryLocation(hypre_handle()); return vector; } /*-------------------------------------------------------------------------- * hypre_SeqMultiVectorCreate *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqMultiVectorCreate( HYPRE_Int size, HYPRE_Int num_vectors ) { hypre_Vector *vector = hypre_SeqVectorCreate(size); hypre_VectorNumVectors(vector) = num_vectors; return vector; } /*-------------------------------------------------------------------------- * hypre_SeqVectorDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorDestroy( hypre_Vector *vector ) { HYPRE_Int ierr=0; if (vector) { HYPRE_MemoryLocation memory_location = hypre_VectorMemoryLocation(vector); if ( hypre_VectorOwnsData(vector) ) { hypre_TFree(hypre_VectorData(vector), memory_location); } hypre_TFree(vector, HYPRE_MEMORY_HOST); } return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorInitialize_v2( hypre_Vector *vector, HYPRE_MemoryLocation memory_location ) { HYPRE_Int size = hypre_VectorSize(vector); HYPRE_Int ierr = 0; HYPRE_Int num_vectors = hypre_VectorNumVectors(vector); HYPRE_Int multivec_storage_method = hypre_VectorMultiVecStorageMethod(vector); hypre_VectorMemoryLocation(vector) = memory_location; /* Caveat: for pre-existing data, the memory location must be guaranteed * to be consistent with `memory_location' * Otherwise, mismatches will exist and problems will be encountered * when being used, and freed */ if ( !hypre_VectorData(vector) ) { hypre_VectorData(vector) = hypre_CTAlloc(HYPRE_Complex, num_vectors*size, memory_location); } if ( multivec_storage_method == 0 ) { hypre_VectorVectorStride(vector) = size; hypre_VectorIndexStride(vector) = 1; } else if ( multivec_storage_method == 1 ) { hypre_VectorVectorStride(vector) = 1; hypre_VectorIndexStride(vector) = num_vectors; } else { ++ierr; } return ierr; } HYPRE_Int hypre_SeqVectorInitialize( hypre_Vector *vector ) { HYPRE_Int ierr; ierr = hypre_SeqVectorInitialize_v2( vector, hypre_VectorMemoryLocation(vector) ); return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetDataOwner( hypre_Vector *vector, HYPRE_Int owns_data ) { HYPRE_Int ierr=0; hypre_VectorOwnsData(vector) = owns_data; return ierr; } /*-------------------------------------------------------------------------- * ReadVector *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorRead( char *file_name ) { hypre_Vector *vector; FILE *fp; HYPRE_Complex *data; HYPRE_Int size; HYPRE_Int j; /*---------------------------------------------------------- * Read in the data *----------------------------------------------------------*/ fp = fopen(file_name, "r"); hypre_fscanf(fp, "%d", &size); vector = hypre_SeqVectorCreate(size); hypre_VectorMemoryLocation(vector) = HYPRE_MEMORY_HOST; hypre_SeqVectorInitialize(vector); data = hypre_VectorData(vector); for (j = 0; j < size; j++) { hypre_fscanf(fp, "%le", &data[j]); } fclose(fp); /* multivector code not written yet */ hypre_assert( hypre_VectorNumVectors(vector) == 1 ); return vector; } /*-------------------------------------------------------------------------- * hypre_SeqVectorPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorPrint( hypre_Vector *vector, char *file_name ) { FILE *fp; HYPRE_Complex *data; HYPRE_Int size, num_vectors, vecstride, idxstride; HYPRE_Int i, j; HYPRE_Complex value; HYPRE_Int ierr = 0; num_vectors = hypre_VectorNumVectors(vector); vecstride = hypre_VectorVectorStride(vector); idxstride = hypre_VectorIndexStride(vector); /*---------------------------------------------------------- * Print in the data *----------------------------------------------------------*/ data = hypre_VectorData(vector); size = hypre_VectorSize(vector); fp = fopen(file_name, "w"); if ( hypre_VectorNumVectors(vector) == 1 ) { hypre_fprintf(fp, "%d\n", size); } else { hypre_fprintf(fp, "%d vectors of size %d\n", num_vectors, size ); } if ( num_vectors>1 ) { for ( j=0; j<num_vectors; ++j ) { hypre_fprintf(fp, "vector %d\n", j ); for (i = 0; i < size; i++) { value = data[ j*vecstride + i*idxstride ]; #ifdef HYPRE_COMPLEX hypre_fprintf(fp, "%.14e , %.14e\n", hypre_creal(value), hypre_cimag(value)); #else hypre_fprintf(fp, "%.14e\n", value); #endif } } } else { for (i = 0; i < size; i++) { #ifdef HYPRE_COMPLEX hypre_fprintf(fp, "%.14e , %.14e\n", hypre_creal(data[i]), hypre_cimag(data[i])); #else hypre_fprintf(fp, "%.14e\n", data[i]); #endif } } fclose(fp); return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetConstantValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetConstantValues( hypre_Vector *v, HYPRE_Complex value ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *vector_data = hypre_VectorData(v); HYPRE_Int size = hypre_VectorSize(v); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(v); //hypre_SeqVectorPrefetch(v, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) HYPRE_THRUST_CALL( fill_n, vector_data, size, value ); #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(vector_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { vector_data[i] = value; } #endif /* defined(HYPRE_USING_CUDA) */ hypre_SyncCudaComputeStream(hypre_handle()); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetRandomValues * * returns vector of values randomly distributed between -1.0 and +1.0 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetRandomValues( hypre_Vector *v, HYPRE_Int seed ) { HYPRE_Complex *vector_data = hypre_VectorData(v); HYPRE_Int size = hypre_VectorSize(v); HYPRE_Int i; HYPRE_Int ierr = 0; hypre_SeedRand(seed); size *= hypre_VectorNumVectors(v); if (hypre_GetActualMemLocation(hypre_VectorMemoryLocation(v)) == hypre_MEMORY_HOST) { /* RDF: threading this loop may cause problems because of hypre_Rand() */ for (i = 0; i < size; i++) { vector_data[i] = 2.0 * hypre_Rand() - 1.0; } } else { HYPRE_Complex *h_data = hypre_TAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); for (i = 0; i < size; i++) { h_data[i] = 2.0 * hypre_Rand() - 1.0; } hypre_TMemcpy(vector_data, h_data, HYPRE_Complex, size, hypre_VectorMemoryLocation(v), HYPRE_MEMORY_HOST); hypre_TFree(h_data, HYPRE_MEMORY_HOST); } return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorCopy * copies data from x to y * if size of x is larger than y only the first size_y elements of x are * copied to y *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorCopy( hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Int ierr = 0; size_t size = hypre_min( hypre_VectorSize(x), hypre_VectorSize(y) ) * hypre_VectorNumVectors(x); hypre_TMemcpy( hypre_VectorData(y), hypre_VectorData(x), HYPRE_Complex, size, hypre_VectorMemoryLocation(y), hypre_VectorMemoryLocation(x) ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorCloneDeep * Returns a complete copy of x - a deep copy, with its own copy of the data. *--------------------------------------------------------------------------*/ hypre_Vector* hypre_SeqVectorCloneDeep_v2( hypre_Vector *x, HYPRE_MemoryLocation memory_location ) { HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int num_vectors = hypre_VectorNumVectors(x); hypre_Vector *y = hypre_SeqMultiVectorCreate( size, num_vectors ); hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x); hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x); hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x); hypre_SeqVectorInitialize_v2(y, memory_location); hypre_SeqVectorCopy( x, y ); return y; } hypre_Vector* hypre_SeqVectorCloneDeep( hypre_Vector *x ) { return hypre_SeqVectorCloneDeep_v2(x, hypre_VectorMemoryLocation(x)); } /*-------------------------------------------------------------------------- * hypre_SeqVectorCloneShallow * Returns a complete copy of x - a shallow copy, pointing the data of x *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorCloneShallow( hypre_Vector *x ) { HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int num_vectors = hypre_VectorNumVectors(x); hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors ); hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x); hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x); hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x); hypre_VectorMemoryLocation(y) = hypre_VectorMemoryLocation(x); hypre_VectorData(y) = hypre_VectorData(x); hypre_SeqVectorSetDataOwner( y, 0 ); hypre_SeqVectorInitialize(y); return y; } /*-------------------------------------------------------------------------- * hypre_SeqVectorScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorScale( HYPRE_Complex alpha, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(y); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(y); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDscal(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, y_data, 1) ); #else HYPRE_THRUST_CALL( transform, y_data, y_data + size, y_data, alpha * _1 ); #endif #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] *= alpha; } #endif /* defined(HYPRE_USING_CUDA) */ hypre_SyncCudaComputeStream(hypre_handle()); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorAxpy( HYPRE_Complex alpha, hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDaxpy(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, x_data, 1, y_data, 1) ); #else HYPRE_THRUST_CALL( transform, x_data, x_data + size, y_data, y_data, alpha * _1 + _2 ); #endif #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha * x_data[i]; } #endif /* defined(HYPRE_USING_CUDA) */ hypre_SyncCudaComputeStream(hypre_handle()); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorInnerProd *--------------------------------------------------------------------------*/ HYPRE_Real hypre_SeqVectorInnerProd( hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Real result = 0.0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) #ifndef HYPRE_COMPLEX #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDdot(hypre_HandleCublasHandle(hypre_handle()), size, x_data, 1, y_data, 1, &result) ); #else result = HYPRE_THRUST_CALL( inner_product, x_data, x_data + size, y_data, 0.0 ); #endif #else /* TODO */ #error "Complex inner product" #endif #else /* #if defined(HYPRE_USING_CUDA) */ HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) reduction(+:result) is_device_ptr(y_data,x_data) map(result) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { result += hypre_conj(y_data[i]) * x_data[i]; } #endif /* defined(HYPRE_USING_CUDA) */ hypre_SyncCudaComputeStream(hypre_handle()); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return result; } //TODO /*-------------------------------------------------------------------------- * hypre_VectorSumElts: * Returns the sum of all vector elements. *--------------------------------------------------------------------------*/ HYPRE_Complex hypre_SeqVectorSumElts( hypre_Vector *vector ) { HYPRE_Complex sum = 0; HYPRE_Complex *data = hypre_VectorData( vector ); HYPRE_Int size = hypre_VectorSize( vector ); HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE #endif for ( i=0; i<size; ++i ) sum += data[i]; return sum; } HYPRE_Int hypre_SeqVectorPrefetch( hypre_Vector *x, HYPRE_MemoryLocation memory_location) { HYPRE_Int ierr = 0; #ifdef HYPRE_USING_UNIFIED_MEMORY if (hypre_VectorMemoryLocation(x) != HYPRE_MEMORY_DEVICE) { /* hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! CUDA Prefetch with non-unified momory\n");*/ return 1; } HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Int size = hypre_VectorSize(x) * hypre_VectorNumVectors(x); if (size == 0) { return ierr; } hypre_MemPrefetch(x_data, sizeof(HYPRE_Complex)*size, memory_location); #endif return ierr; } #if 0 /* y[i] = max(alpha*x[i], beta*y[i]) */ HYPRE_Int hypre_SeqVectorMax( HYPRE_Complex alpha, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); thrust::maximum<HYPRE_Complex> mx; #if defined(HYPRE_USING_CUDA) HYPRE_THRUST_CALL( transform, thrust::make_transform_iterator(x_data, alpha * _1), thrust::make_transform_iterator(x_data + size, alpha * _1), thrust::make_transform_iterator(y_data, beta * _1), y_data, mx ); #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += hypre_max(alpha * x_data[i], beta * y_data[i]); } #endif /* defined(HYPRE_USING_CUDA) */ hypre_SyncCudaComputeStream(hypre_handle()); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } #endif
openmp-2dheat.c
/* * B. Estrade <estrabd@cs.uh.edu> * Original coding: Spring 2004 * Serialized: Summer 2010 * Wrapped into MPI/OpenMP 2dheat Suite: Summer 2010 * OpenMP added: ..not yet! :) * * Serial implementation of 2d heat conduction * finite difference over a rectangular domain using: * - Jacobi * - Gauss-Seidel * - SOR * * This code was created by eliminating the MPI include and * API function calls from the parallel version in order to * serve as a starting point for inserting OpenMP directives. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ T_SRC0 @ X - (W/2,H) | *******X******* *.............* *.............* *.............* *.............* *.............* ~ 0.0 @ all bdy by "X" (W/2,H) *.............* *.............* *.............* *.............* *.............* *************** 2D domain - WIDTH x HEIGHT "X" = T_SRC0 "*" = 0.0 "." = internal node suceptible to heating * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define _WIDTH 50 #define _HEIGHT 50 #define H 1.0 #define _EPSILON 0.1 /* methods: 1 - jacobi 2 - gauss-seidel 3 - sor */ #define _METHOD 2 #define ITERMAX 10 #define T_SRC0 550.0 #define ROOT 0 /* Includes */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <unistd.h> #include <omp.h> #include <stdint.h> #include <sys/time.h> #include <time.h> /* declare functions */ int get_start (int rank); int get_end (int rank); int get_num_rows (int rank); void init_domain (float ** domain_ptr,int rank); void jacobi (float ** current_ptr,float ** next_ptr); void gauss_seidel (float ** current_ptr,float ** next_ptr); void sor (float ** current_ptr,float ** next_ptr); float get_val_par (float * above_ptr,float ** domain_ptr,float * below_ptr,int rank,int i,int j); void enforce_bc_par (float ** domain_ptr,int rank,int i,int j); int global_to_local (int rank, int row); float f (int i,int j); float get_convergence_sqd (float ** current_ptr,float ** next_ptr,int rank); /* declare and set globals */ int WIDTH=_WIDTH; int HEIGHT=_HEIGHT; int meth=_METHOD; int num_threads; float EPSILON=_EPSILON; /* Function pointer to solver method of choice */ void (*method) (); int main(int argc, char** argv) { int p,my_rank,time; /* arrays used to contain each PE's rows - specify cols, no need to spec rows */ float **U_Curr; float **U_Next; /* helper variables */ float convergence,convergence_sqd,local_convergence_sqd; /* available iterator */ int i,j,k,m,n; int per_proc,remainder,my_start_row,my_end_row,my_num_rows; int verbose = 0; int show_time = 0; /* for timings */ struct timeval tv; struct timezone tz; struct tm *tm; /* artifacts of original serialization from MPI version */ p = 1; my_rank = 0; /* argument processing done by everyone */ int c,errflg; extern char *optarg; extern int optind, optopt; while ((c = getopt(argc, argv, "e:h:m:tw:v")) != -1) { switch(c) { case 'e': EPSILON = atof(optarg); break; case 'h': HEIGHT = atoi(optarg); break; case 'm': /* selects the numerical methods */ switch(atoi(optarg)) { case 1: /* jacobi */ meth = 1; break; case 2: /* gauss-seidel */ meth = 2; break; case 3: /* sor */ meth = 3; break; } break; case 't': show_time++; /* overridden by -v (verbose) */ break; case 'w': WIDTH = atoi(optarg); break; case 'v': verbose++; break; /* handle bad arguments */ case ':': /* -h or -w without operand */ if (ROOT == my_rank) fprintf(stderr,"Option -%c requires an operand\n", optopt); errflg++; break; case '?': if (ROOT == my_rank) fprintf(stderr,"Unrecognized option: -%c\n", optopt); errflg++; break; } } /* if (0 < errflg) exit(EXIT_FAILURE); */ /* wait for user to input runtime params */ //MPI_Barrier(MPI_COMM_WORLD); /* broadcast method to use */ //(void) MPI_Bcast(&meth,1,MPI_INT,0,MPI_COMM_WORLD); switch (meth) { case 1: method = &jacobi; break; case 2: method = &gauss_seidel; break; case 3: method = &sor; break; } /* let each processor decide what rows(s) it owns */ my_start_row = get_start(my_rank); my_end_row = get_end(my_rank); my_num_rows = get_num_rows(my_rank); if ( 0 < verbose ) printf("proc %d contains (%d) rows %d to %d\n",my_rank,my_num_rows,my_start_row,my_end_row); fflush(stdout); /* allocate 2d array */ U_Curr = (float**)malloc(sizeof(float*)*my_num_rows); U_Curr[0] = (float*)malloc(sizeof(float)*my_num_rows*(int)floor(WIDTH/H)); for (i=1;i<my_num_rows;i++) { U_Curr[i] = U_Curr[i-1]+(int)floor(WIDTH/H); } /* allocate 2d array */ U_Next = (float**)malloc(sizeof(float*)*my_num_rows); U_Next[0] = (float*)malloc(sizeof(float)*my_num_rows*(int)floor(WIDTH/H)); for (i=1;i<my_num_rows;i++) { U_Next[i] = U_Next[i-1]+(int)floor(WIDTH/H); } /* initialize global grid */ init_domain(U_Curr,my_rank); init_domain(U_Next,my_rank); /* iterate for solution */ if (my_rank == ROOT) { gettimeofday(&tv, &tz); tm=localtime(&tv.tv_sec); time = 1000000*(tm->tm_hour * 3600 + tm->tm_min * 60 + tm->tm_sec) + tv.tv_usec; } k = 1; num_threads = 0; //while (1) { for(;;) { method(U_Curr,U_Next); local_convergence_sqd = get_convergence_sqd(U_Curr,U_Next,my_rank); //MPI_Reduce(&local_convergence_sqd,&convergence_sqd,1,MPI_FLOAT,MPI_SUM,ROOT,MPI_COMM_WORLD); convergence_sqd = local_convergence_sqd; if (my_rank == ROOT) { convergence = sqrt(convergence_sqd); if (verbose == 1) { printf("L2 = %f\n",convergence); fflush(stdout); } } /* broadcast method to use */ //(void) MPI_Bcast(&convergence,1,MPI_INT,0,MPI_COMM_WORLD); if (convergence <= EPSILON) { break; } /* copy U_Next to U_Curr */ for (j=my_start_row;j<=my_end_row;j++) { for (i=0;i<(int)floor(WIDTH/H);i++) { U_Curr[j-my_start_row][i] = U_Next[j-my_start_row][i]; } } k++; //MPI_Barrier(MPI_COMM_WORLD); } /* say something at the end */ if (my_rank == ROOT) { gettimeofday(&tv, &tz); tm=localtime(&tv.tv_sec); time = 1000000*(tm->tm_hour * 3600 + tm->tm_min * 60 + tm->tm_sec) + tv.tv_usec - time; if (0 < verbose) { printf("Estimated time to convergence in %d iterations using %d processors on a %dx%d grid is %d microseconds\n",k,p,(int)floor(WIDTH/H),(int)floor(HEIGHT/H),time); } else if (show_time) { printf("% 5d\t% 12d msec\n",omp_get_max_threads(),time); } /* else show nothing */ } exit(EXIT_SUCCESS); //return 0; not needed; not reached } /* used by each PE to compute the sum of the squared diffs between current iteration and previous */ float get_convergence_sqd (float ** current_ptr,float ** next_ptr,int rank) { int i,j,my_start,my_end,my_num_rows; float sum; my_start = get_start(rank); my_end = get_end(rank); my_num_rows = get_num_rows(rank); sum = 0.0; for (j=my_start;j<=my_end;j++) { for (i=0;i<(int)floor(WIDTH/H);i++) { sum += pow(next_ptr[global_to_local(rank,j)][i]-current_ptr[global_to_local(rank,j)][i],2); } } return sum; } /* implements parallel jacobi methods */ void jacobi (float ** current_ptr,float ** next_ptr) { int i,j,p,my_rank,my_start,my_end,my_num_rows; float U_Curr_Above[(int)floor(WIDTH/H)]; /* 1d array holding values from bottom row of PE above */ float U_Curr_Below[(int)floor(WIDTH/H)]; /* 1d array holding values from top row of PE below */ p = 1; my_rank = 0; my_start = get_start(my_rank); my_end = get_end(my_rank); my_num_rows = get_num_rows(my_rank); #pragma omp parallel default(none) private(i,j) \ shared(p,my_rank,U_Curr_Above,U_Curr_Below,WIDTH,my_start,my_end,next_ptr,current_ptr) { /* Jacobi method using global addressing */ #pragma omp for schedule(runtime) for (j=my_start;j<=my_end;j++) { for (i=0;i<(int)floor(WIDTH/H);i++) { next_ptr[j-my_start][i] = .25*(get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i-1,j) + get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i+1,j) + get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j-1) + get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j+1) - (pow(H,2)*f(i,j))); enforce_bc_par(next_ptr,my_rank,i,j); } } } //end omp parallel region } /* implements parallel g-s method */ void gauss_seidel (float ** current_ptr,float ** next_ptr) { int i,j,p,my_rank,my_start,my_end,my_num_rows; float U_Curr_Above[(int)floor(WIDTH/H)]; /* 1d array holding values from bottom row of PE above */ float U_Curr_Below[(int)floor(WIDTH/H)]; /* 1d array holding values from top row of PE below */ float W = 1.0; p = 1; my_rank = 0; my_start = get_start(my_rank); my_end = get_end(my_rank); my_num_rows = get_num_rows(my_rank); #pragma omp parallel default(none) private(i,j) \ shared(W,p,my_rank,U_Curr_Above,U_Curr_Below,WIDTH,my_start,my_end,next_ptr,current_ptr) { /* solve next reds (i+j odd) */ #pragma omp for schedule(runtime) for (j=my_start;j<=my_end;j++) { for (i=0;i<(int)floor(WIDTH/H);i++) { if ((i+j)%2 != 0) { next_ptr[j-my_start][i] = get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j) + (W/4)*(get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i-1,j) + get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i+1,j) + get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j-1) + get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j+1) - 4*(get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j)) - (pow(H,2)*f(i,j))); enforce_bc_par(next_ptr,my_rank,i,j); } } } /* solve next blacks (i+j) even .... using next reds */ #pragma omp for schedule(runtime) for (j=my_start;j<=my_end;j++) { for (i=0;i<(int)floor(WIDTH/H);i++) { if ((i+j)%2 == 0) { next_ptr[j-my_start][i] = get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j) + (W/4)*(get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i-1,j) + get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i+1,j) + get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i,j-1) + get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i,j+1) - 4*(get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i,j)) - (pow(H,2)*f(i,j))); enforce_bc_par(next_ptr,my_rank,i,j); } } } } //end omp parallel region } /* implements parallels sor method */ void sor (float ** current_ptr,float ** next_ptr) { int i,j,p,my_rank,my_start,my_end,my_num_rows; float U_Curr_Above[(int)floor(WIDTH/H)]; /* 1d array holding values from bottom row of PE above */ float U_Curr_Below[(int)floor(WIDTH/H)]; /* 1d array holding values from top row of PE below */ float W = 1.5; p = 1; my_rank = 0; my_start = get_start(my_rank); my_end = get_end(my_rank); my_num_rows = get_num_rows(my_rank); #pragma omp parallel default(none) private(i,j) \ shared(W,p,my_rank,U_Curr_Above,U_Curr_Below,WIDTH,my_start,my_end,next_ptr,current_ptr) { #pragma omp for schedule(runtime) /* solve next reds (i+j odd) */ for (j=my_start;j<=my_end;j++) { for (i=0;i<(int)floor(WIDTH/H);i++) { if ((i+j)%2 != 0) { next_ptr[j-my_start][i] = get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j) + (W/4)*(get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i-1,j) + get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i+1,j) + get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j-1) + get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j+1) - 4*(get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j)) - (pow(H,2)*f(i,j))); enforce_bc_par(next_ptr,my_rank,i,j); } } } /* solve next blacks (i+j) even .... using next reds */ #pragma omp for schedule(runtime) for (j=my_start;j<=my_end;j++) { for (i=0;i<(int)floor(WIDTH/H);i++) { if ((i+j)%2 == 0) { next_ptr[j-my_start][i] = get_val_par(U_Curr_Above,current_ptr,U_Curr_Below,my_rank,i,j) + (W/4)*(get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i-1,j) + get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i+1,j) + get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i,j-1) + get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i,j+1) - 4*(get_val_par(U_Curr_Above,next_ptr,U_Curr_Below,my_rank,i,j)) - (pow(H,2)*f(i,j))); enforce_bc_par(next_ptr,my_rank,i,j); } } } } //end omp parallel region } /* enforces bcs in in serial and parallel */ void enforce_bc_par (float ** domain_ptr,int rank,int i,int j) { /* enforce bc's first */ if(i == ((int)floor(WIDTH/H/2)-1) && j == 0) { /* This is the heat source location */ domain_ptr[j][i] = T_SRC0; } else if (i <= 0 || j <= 0 || i >= ((int)floor(WIDTH/H)-1) || j >= ((int)floor(HEIGHT/H)-1)) { /* All edges and beyond are set to 0.0 */ domain_ptr[global_to_local(rank,j)][i] = 0.0; } } /* returns appropriate values for requested i,j */ float get_val_par (float * above_ptr,float ** domain_ptr,float * below_ptr,int rank,int i,int j) { float ret_val; int p; /* artifact from original serialization of MPI version */ p = 1; /* enforce bc's first */ if(i == ((int)floor(WIDTH/H/2)-1) && j == 0) { /* This is the heat source location */ ret_val = T_SRC0; } else if (i <= 0 || j <= 0 || i >= ((int)floor(WIDTH/H)-1) || j >= ((int)floor(HEIGHT/H)-1)) { /* All edges and beyond are set to 0.0 */ ret_val = 0.0; } else { /* Else, return value for matrix supplied or ghost rows */ if (j < get_start(rank)) { if (rank == ROOT) { /* not interested in above ghost row */ ret_val = 0.0; } else { ret_val = above_ptr[i]; /*printf("%d: Used ghost (%d,%d) row from above = %f\n",rank,i,j,above_ptr[i]); fflush(stdout);*/ } } else if (j > get_end(rank)) { if (rank == (p-1)) { /* not interested in below ghost row */ ret_val = 0.0; } else { ret_val = below_ptr[i]; /*printf("%d: Used ghost (%d,%d) row from below = %f\n",rank,i,j,below_ptr[i]); fflush(stdout);*/ } } else { /* else, return the value in the domain asked for */ ret_val = domain_ptr[global_to_local(rank,j)][i]; /*printf("%d: Used real (%d,%d) row from self = %f\n",rank,i,global_to_local(rank,j),domain_ptr[global_to_local(rank,j)][i]); fflush(stdout);*/ } } return ret_val; } /* initialized domain to 0.0 - could be where grid file is read in */ void init_domain (float ** domain_ptr,int rank) { int i,j,start,end,rows; start = get_start(rank); end = get_end(rank); rows = get_num_rows(rank); for (j=start;j<end;j++) { for (i=0;i<(int)floor(WIDTH/H);i++) { domain_ptr[j-start][i] = 0.0; } } } /* computes start row for given PE */ int get_start (int rank) { /* computer row divisions to each proc */ int p,per_proc,start_row,remainder; /* artifact of serialization of orignal MPI version */ p = 1; /* get initial whole divisor */ per_proc = (int)floor(HEIGHT/H)/p; /* get number of remaining */ remainder = (int)floor(HEIGHT/H)%p; /* there is a remainder, then it distribute it to the first "remainder" procs */ if (rank < remainder) { start_row = rank * (per_proc + 1); } else { start_row = rank * (per_proc) + remainder; } return start_row; } /* computes end row for given PE */ int get_end (int rank) { /* computer row divisions to each proc */ int p,per_proc,remainder,end_row; /* artifact of serialization of orignal MPI version */ p = 1; per_proc = (int)floor(HEIGHT/H)/p; remainder = (int)floor(HEIGHT/H)%p; if (rank < remainder) { end_row = get_start(rank) + per_proc; } else { end_row = get_start(rank) + per_proc - 1; } return end_row; } /* calcs number of rows for given PE */ int get_num_rows (int rank) { return 1 + get_end(rank) - get_start(rank); } int global_to_local (int rank, int row) { return row - get_start(rank); } /* * f - function that would be non zero if there was an internal heat source */ float f (int i,int j) { return 0.0; }
viterbi_decode_op.h
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <memory> #include <string> #include <vector> #include "paddle/fluid/operators/controlflow/compare_op.h" #include "paddle/fluid/operators/elementwise/elementwise_functor.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/transpose_op.h" #include "paddle/fluid/operators/unique_op.h" #include "paddle/phi/kernels/funcs/gather.h" #ifdef PADDLE_WITH_MKLML #include <omp.h> #endif namespace paddle { namespace operators { template <typename DeviceContext, typename T, typename IndType> struct Argmax { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor& input, framework::Tensor* out_idx, framework::Tensor* out, int axis) { framework::DDim input_dims = input.dims(); int64_t pre = 1; int64_t post = 1; int64_t n = input_dims[axis]; for (int i = 0; i < axis; i++) { pre *= input_dims[i]; } for (int i = axis + 1; i < input_dims.size(); i++) { post *= input_dims[i]; } int64_t height = pre * post; int64_t width = n; const T* in_data = input.data<T>(); IndType* out_idx_data = out_idx->data<IndType>(); T* out_data = out->data<T>(); // Reduce #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int64_t i = 0; i < height; ++i) { int64_t h = i / post; int64_t w = i % post; IndType max_idx = -1; T max_value = (std::numeric_limits<T>::lowest)(); // for windows compile for (int64_t j = 0; j < width; ++j) { if (in_data[h * width * post + j * post + w] > max_value) { max_value = in_data[h * width * post + j * post + w]; max_idx = j; } } out_data[i] = max_value; out_idx_data[i] = max_idx; } } }; template <typename DeviceContext> struct ARange { void operator()(const DeviceContext& dev_ctx, int64_t* data, int end, int64_t scale) { for (int i = 0; i < end; ++i) { data[i] = i * scale; } } }; template <typename DeviceContext, typename T> struct GetMaxValue { void operator()(const DeviceContext& dev_ctx, const framework::Tensor& input, T* max_value) { auto input_ptr = input.data<T>(); auto num = input.numel(); *max_value = *std::max_element(input_ptr, input_ptr + num); } }; template <typename DeviceContext, typename T, typename IndexT = int> struct Gather { void operator()(const DeviceContext& ctx, const framework::Tensor& src, const framework::Tensor& index, framework::Tensor* output) { phi::funcs::CPUGather<T, IndexT>(ctx, src, index, output); } }; template <typename T, typename Functor, typename OutT = T> void SameDimsBinaryOP(const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* out) { const T* lhs_ptr = lhs.data<T>(); const T* rhs_ptr = rhs.data<T>(); OutT* out_ptr = out->data<OutT>(); Functor functor; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < out->numel(); ++i) { out_ptr[i] = functor(lhs_ptr[i], rhs_ptr[i]); } } template <typename DeviceContext, template <typename InT, typename OutT> typename CompareFunctor, typename T> struct GetMask { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* mask) { SameDimsBinaryOP<int64_t, CompareFunctor<int64_t, T>, T>(lhs, rhs, mask); } }; template <bool is_multi_threads> struct GetInputIndex { void operator()(const std::vector<int>& lhs_dims, const std::vector<int>& rhs_dims, const std::vector<int>& output_dims, const std::vector<int>& lhs_strides, const std::vector<int>& rhs_strides, const std::vector<int>& output_strides, int output_idx, int* index_array, int* lhs_idx, int* rhs_idx) { int out_dims_size = output_strides.size(); for (int j = 0; j < out_dims_size; ++j) { int curr_idx = output_idx / output_strides[j]; output_idx %= output_strides[j]; *lhs_idx += (lhs_dims[j] > 1) ? curr_idx * lhs_strides[j] : 0; *rhs_idx += (rhs_dims[j] > 1) ? curr_idx * rhs_strides[j] : 0; } } }; template <> struct GetInputIndex<false> { void operator()(const std::vector<int>& lhs_dims, const std::vector<int>& rhs_dims, const std::vector<int>& output_dims, const std::vector<int>& lhs_strides, const std::vector<int>& rhs_strides, const std::vector<int>& output_strides, int output_idx, int* index_array, int* lhs_idx, int* rhs_idx) { int out_dims_size = output_strides.size(); *lhs_idx = phi::funcs::GetElementwiseIndex(lhs_dims.data(), out_dims_size, index_array); *rhs_idx = phi::funcs::GetElementwiseIndex(rhs_dims.data(), out_dims_size, index_array); phi::funcs::UpdateElementwiseIndexArray(output_dims.data(), out_dims_size, index_array); } }; template <typename T, typename Functor, bool is_multi_threads = false> void SimpleBroadcastBinaryOP(const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* out) { const T* lhs_ptr = lhs.data<T>(); const T* rhs_ptr = rhs.data<T>(); T* out_ptr = out->data<T>(); int out_size = static_cast<int>(out->dims().size()); std::vector<int> out_dims(out_size); std::vector<int> lhs_dims(out_size); std::vector<int> rhs_dims(out_size); std::copy(lhs.dims().Get(), lhs.dims().Get() + out_size, lhs_dims.data()); std::copy(rhs.dims().Get(), rhs.dims().Get() + out_size, rhs_dims.data()); std::copy(out->dims().Get(), out->dims().Get() + out_size, out_dims.data()); std::vector<int> output_strides(out_size, 1); std::vector<int> lhs_strides(out_size, 1); std::vector<int> rhs_strides(out_size, 1); std::vector<int> index_array(out_size, 0); // calculate strides for (int i = out_size - 2; i >= 0; --i) { output_strides[i] = output_strides[i + 1] * out_dims[i + 1]; lhs_strides[i] = lhs_strides[i + 1] * lhs_dims[i + 1]; rhs_strides[i] = rhs_strides[i + 1] * rhs_dims[i + 1]; } Functor functor; GetInputIndex<is_multi_threads> get_input_index; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < out->numel(); ++i) { int lhs_idx = 0; int rhs_idx = 0; get_input_index(lhs_dims, rhs_dims, out_dims, lhs_strides, rhs_strides, output_strides, i, index_array.data(), &lhs_idx, &rhs_idx); out_ptr[i] = functor(lhs_ptr[lhs_idx], rhs_ptr[rhs_idx]); } } template <typename DeviceContext, template <typename T> typename BinaryFunctor, typename T> struct BinaryOperation { void operator()(const DeviceContext& dev_ctx, const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* output) { if (lhs.dims() == rhs.dims()) { SameDimsBinaryOP<T, BinaryFunctor<T>>(lhs, rhs, output); } else { bool is_multi_threads = false; #ifdef PADDLE_WITH_MKLML if (omp_get_max_threads() > 1) { is_multi_threads = true; } #endif if (is_multi_threads) { SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, true>(lhs, rhs, output); } else { SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, false>(lhs, rhs, output); } } } }; class TensorBuffer { public: explicit TensorBuffer(const framework::LoDTensor& in) : buffer_(in), offset_(0) { buffer_.Resize({buffer_.numel()}); } framework::Tensor GetBufferBlock(std::initializer_list<int64_t> shape) { int64_t size = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int64_t>()); framework::Tensor block = buffer_.Slice(offset_, offset_ + size); offset_ += size; block.Resize(shape); return block; } private: framework::LoDTensor buffer_; // need to resize 1-D Tensor int offset_; }; template <typename DeviceContext, typename T> class ViterbiDecodeKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { bool include_bos_eos_tag = ctx.Attr<bool>("include_bos_eos_tag"); auto& dev_ctx = ctx.template device_context<DeviceContext>(); auto curr_place = ctx.GetPlace(); auto* input = ctx.Input<framework::Tensor>("Input"); auto batch_size = static_cast<int>(input->dims()[0]); auto seq_len = static_cast<int>(input->dims()[1]); auto n_labels = static_cast<int>(input->dims()[2]); phi::funcs::SetConstant<DeviceContext, T> float_functor; phi::funcs::SetConstant<DeviceContext, int64_t> int_functor; std::vector<framework::Tensor> historys; // We create tensor buffer in order to avoid allocating memory frequently // 10 means allocate 10*batch_size bytes memory, such as int_mask, zero... int buffer_size = batch_size * (n_labels + 1) * seq_len + 10 * batch_size; framework::LoDTensor int_buffer; int_buffer.Resize(phi::make_ddim({buffer_size})); int_buffer.mutable_data<int64_t>(ctx.GetPlace()); TensorBuffer int_tensor_buffer(int_buffer); // create float tensor buffer // 10 means allocate 10*batch_size*n_labels bytes, such as alpha, alpha_max buffer_size = batch_size * (seq_len + 10) * n_labels + (batch_size + 2) * n_labels * n_labels; framework::LoDTensor float_buffer; float_buffer.Resize(phi::make_ddim({buffer_size})); float_buffer.mutable_data<T>(ctx.GetPlace()); TensorBuffer float_tensor_buffer(float_buffer); auto* length = ctx.Input<framework::Tensor>("Length"); framework::Tensor left_length = int_tensor_buffer.GetBufferBlock({batch_size, 1}); framework::TensorCopy(*length, curr_place, dev_ctx, &left_length); int64_t max_seq_len = 0; GetMaxValue<DeviceContext, int64_t> get_max_value; get_max_value(dev_ctx, left_length, &max_seq_len); auto* scores = ctx.Output<framework::Tensor>("Scores"); scores->mutable_data<T>(curr_place); auto* path = ctx.Output<framework::Tensor>("Path"); path->Resize({batch_size, max_seq_len}); path->mutable_data<int64_t>(curr_place); framework::Tensor tpath = int_tensor_buffer.GetBufferBlock({max_seq_len, batch_size}); auto batch_path = Unbind(tpath); for (auto it = batch_path.begin(); it != batch_path.end(); ++it) { it->Resize({batch_size}); } // create and init required tensor framework::Tensor input_exp = float_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels}); TransCompute<DeviceContext, T>(3, dev_ctx, *input, &input_exp, {1, 0, 2}); auto* transition = ctx.Input<framework::Tensor>("Transition"); framework::Tensor trans_exp = float_tensor_buffer.GetBufferBlock({n_labels, n_labels}); framework::TensorCopy(*transition, curr_place, dev_ctx, &trans_exp); trans_exp.Resize({1, n_labels, n_labels}); framework::Tensor alpha = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); framework::Tensor zero = int_tensor_buffer.GetBufferBlock({batch_size, 1}); int_functor(dev_ctx, &zero, 0); framework::Tensor one = int_tensor_buffer.GetBufferBlock({batch_size, 1}); int_functor(dev_ctx, &one, 1); framework::Tensor float_one = float_tensor_buffer.GetBufferBlock({batch_size, 1}); float_functor(dev_ctx, &float_one, static_cast<T>(1.0)); framework::Tensor alpha_trn_sum = float_tensor_buffer.GetBufferBlock({batch_size, n_labels, n_labels}); framework::Tensor alpha_max = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); framework::Tensor alpha_argmax = int_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels}); auto alpha_argmax_unbind = Unbind(alpha_argmax); framework::Tensor alpha_nxt = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); framework::Tensor int_mask = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor zero_len_mask = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor float_mask = float_tensor_buffer.GetBufferBlock({batch_size, 1}); framework::Tensor stop_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels}); framework::Tensor start_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels}); framework::Tensor rest_trans = float_tensor_buffer.GetBufferBlock({1, n_labels - 2, n_labels}); framework::Tensor last_ids = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor last_ids_tmp = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor batch_offset = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor gather_idx = int_tensor_buffer.GetBufferBlock({batch_size}); std::vector<const framework::Tensor*> shape{&rest_trans, &stop_trans, &start_trans}; std::vector<framework::Tensor*> outputs{&rest_trans, &stop_trans, &start_trans}; math::SplitFunctor<DeviceContext, T> split_functor; split_functor(dev_ctx, trans_exp, shape, 1, &outputs); stop_trans.Resize({1, n_labels}); start_trans.Resize({1, n_labels}); auto logit0 = input_exp.Slice(0, 1); logit0.Resize({batch_size, n_labels}); BinaryOperation<DeviceContext, AddFunctor, T> AddFloat; BinaryOperation<DeviceContext, AddFunctor, int64_t> AddInt; BinaryOperation<DeviceContext, MulFunctor, T> MulFloat; BinaryOperation<DeviceContext, MulFunctor, int64_t> MulInt; BinaryOperation<DeviceContext, SubFunctor, T> SubFloat; BinaryOperation<DeviceContext, SubFunctor, int64_t> SubInt; if (include_bos_eos_tag) { AddFloat(dev_ctx, logit0, start_trans, &alpha); GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one, &float_mask); MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt); AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); } else { alpha = logit0; } SubInt(dev_ctx, left_length, one, &left_length); Argmax<DeviceContext, T, int64_t> argmax; for (int64_t i = 1; i < max_seq_len; ++i) { framework::Tensor logit = input_exp.Slice(i, i + 1); logit.Resize({batch_size, n_labels}); framework::Tensor& alpha_exp = alpha.Resize({batch_size, n_labels, 1}); AddFloat(dev_ctx, alpha_exp, trans_exp, &alpha_trn_sum); auto alpha_argmax_temp = alpha_argmax_unbind[i - 1]; alpha_argmax_temp.Resize({batch_size, n_labels}); argmax(ctx, alpha_trn_sum, &alpha_argmax_temp, &alpha_max, 1); historys.emplace_back(alpha_argmax_temp); AddFloat(dev_ctx, alpha_max, logit, &alpha_nxt); alpha.Resize({batch_size, n_labels}); // mask = paddle.cast((left_length > 0), dtype='float32') // alpha = mask * alpha_nxt + (1 - mask) * alpha GetMask<DeviceContext, GreaterThanFunctor, T>()(ctx, left_length, zero, &float_mask); // alpha_nxt = mask * alpha_nxt MulFloat(dev_ctx, alpha_nxt, float_mask, &alpha_nxt); // inv_mask = 1 - mask SubFloat(dev_ctx, float_one, float_mask, &float_mask); // alpha = (1 - mask) * alpha MulFloat(dev_ctx, alpha, float_mask, &alpha); // alpha += alpha_nxt AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); if (include_bos_eos_tag) { GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one, &float_mask); // alpha += mask * trans_exp[:, self.stop_idx] MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt); AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); } SubInt(dev_ctx, left_length, one, &left_length); } argmax(ctx, alpha, &last_ids, scores, 1); left_length.Resize({batch_size}); GetMask<DeviceContext, GreaterEqualFunctor, int64_t>()(ctx, left_length, zero, &int_mask); // last_ids_update = last_ids * tag_mask int last_ids_index = 1; int actual_len = (std::min)(seq_len, static_cast<int>(max_seq_len)); MulInt(dev_ctx, last_ids, int_mask, &batch_path[actual_len - last_ids_index]); // The algorithm below can refer to // https://github.com/PaddlePaddle/PaddleNLP/blob/develop/paddlenlp/layers/crf.py#L438 ARange<DeviceContext> arange; arange(dev_ctx, batch_offset.data<int64_t>(), batch_size, n_labels); Gather<DeviceContext, int64_t, int64_t> gather; for (auto hist = historys.rbegin(); hist != historys.rend(); ++hist) { ++last_ids_index; AddInt(dev_ctx, left_length, one, &left_length); AddInt(dev_ctx, batch_offset, last_ids, &gather_idx); framework::Tensor& last_ids_update = batch_path[actual_len - last_ids_index]; hist->Resize({batch_size * n_labels}); gather(dev_ctx, *hist, gather_idx, &last_ids_update); GetMask<DeviceContext, GreaterThanFunctor, int64_t>()(ctx, left_length, zero, &int_mask); MulInt(dev_ctx, last_ids_update, int_mask, &last_ids_update); GetMask<DeviceContext, EqualFunctor, int64_t>()(ctx, left_length, zero, &zero_len_mask); MulInt(dev_ctx, last_ids, zero_len_mask, &last_ids_tmp); SubInt(dev_ctx, one, zero_len_mask, &zero_len_mask); MulInt(dev_ctx, last_ids_update, zero_len_mask, &last_ids_update); AddInt(dev_ctx, last_ids_update, last_ids_tmp, &last_ids_update); GetMask<DeviceContext, LessThanFunctor, int64_t>()(ctx, left_length, zero, &int_mask); MulInt(dev_ctx, last_ids, int_mask, &last_ids); AddInt(dev_ctx, last_ids_update, last_ids, &last_ids); } TransCompute<DeviceContext, int64_t>(2, dev_ctx, tpath, path, {1, 0}); } }; } // namespace operators } // namespace paddle
GB_unop__identity_uint32_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint32_fp64) // op(A') function: GB (_unop_tran__identity_uint32_fp64) // C type: uint32_t // A type: double // cast: uint32_t cij = GB_cast_to_uint32_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint32_fp64) ( uint32_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint32_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
update_ops_matrix_dense_double.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "constant.h" #include "utility.h" #include "update_ops.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #ifdef _USE_SIMD void double_qubit_dense_matrix_gate_simd_high(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE mat[16], CTYPE* vec, ITYPE dim); void double_qubit_dense_matrix_gate_simd_middle(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE mat[16], CTYPE* vec, ITYPE dim); void double_qubit_dense_matrix_gate_simd_low(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE mat[16], CTYPE* vec, ITYPE dim); #endif void double_qubit_dense_matrix_gate_c(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE matrix[16], CTYPE *state, ITYPE dim) { #ifdef _OPENMP UINT threshold = 12; if (dim < (((ITYPE)1) << threshold)) omp_set_num_threads(1); #endif #ifdef _USE_SIMD double_qubit_dense_matrix_gate_simd(target_qubit_index1, target_qubit_index2, matrix, state, dim); #else double_qubit_dense_matrix_gate_nosimd(target_qubit_index1, target_qubit_index2, matrix, state, dim); #endif #ifdef _OPENMP omp_set_num_threads(omp_get_max_threads()); #endif } void double_qubit_dense_matrix_gate_nosimd(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE matrix[16], CTYPE *state, ITYPE dim) { const UINT min_qubit_index = get_min_ui(target_qubit_index1, target_qubit_index2); const UINT max_qubit_index = get_max_ui(target_qubit_index1, target_qubit_index2); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE target_mask1 = 1ULL << target_qubit_index1; const ITYPE target_mask2 = 1ULL << target_qubit_index2; // loop variables const ITYPE loop_dim = dim / 4; ITYPE state_index; #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { // create index ITYPE basis_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2); // gather index ITYPE basis_1 = basis_0 + target_mask1; ITYPE basis_2 = basis_0 + target_mask2; ITYPE basis_3 = basis_1 + target_mask2; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; CTYPE cval_2 = state[basis_2]; CTYPE cval_3 = state[basis_3]; // set values state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1 + matrix[2] * cval_2 + matrix[3] * cval_3; state[basis_1] = matrix[4] * cval_0 + matrix[5] * cval_1 + matrix[6] * cval_2 + matrix[7] * cval_3; state[basis_2] = matrix[8] * cval_0 + matrix[9] * cval_1 + matrix[10] * cval_2 + matrix[11] * cval_3; state[basis_3] = matrix[12] * cval_0 + matrix[13] * cval_1 + matrix[14] * cval_2 + matrix[15] * cval_3; } } #ifdef _USE_SIMD void double_qubit_dense_matrix_gate_simd_high(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE mat[16], CTYPE* vec, ITYPE dim) { assert(target_qubit_index1 >= 2); assert(target_qubit_index2 >= 2); const UINT min_qubit_index = get_min_ui(target_qubit_index1, target_qubit_index2); const UINT max_qubit_index = get_max_ui(target_qubit_index1, target_qubit_index2); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE target_mask1_shift = 1ULL << (target_qubit_index1 + 1); const ITYPE target_mask2_shift = 1ULL << (target_qubit_index2 + 1); // loop variables const ITYPE loop_dim = dim / 4; ITYPE state_index; double* ptr_vec = (double*)vec; const double* ptr_mat = (const double*)mat; #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; state_index += 4) { __m256d res_real_sum, res_imag_sum; __m256d vec_before, vec_after; __m256d vec_real00, vec_imag00; __m256d vec_real01, vec_imag01; __m256d vec_real10, vec_imag10; __m256d vec_real11, vec_imag11; __m256d dup_mr, dup_mi; // create index ITYPE basis00 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2); // shited due to index from complex -> double basis00 = basis00 << 1; ITYPE basis01 = basis00 + target_mask1_shift; ITYPE basis10 = basis00 + target_mask2_shift; ITYPE basis11 = basis01 + target_mask2_shift; //// Pick 4 complex values from basis00 vec_before = _mm256_loadu_pd(ptr_vec + basis00); // (i1 r1 i0 r0) vec_after = _mm256_loadu_pd(ptr_vec + basis00 + 4); // (i3 r3 i2 r2) //// Split real values and imag values via shuffle vec_real00 = _mm256_shuffle_pd(vec_before, vec_after, 0); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (r3 r1 r2 r0) 0000 = 0 vec_imag00 = _mm256_shuffle_pd(vec_before, vec_after, 15); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (i3 i1 i2 i0) 1111 = 15 //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[0]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[1]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_mul_pd(vec_real00, dup_mr); res_real_sum = _mm256_fnmadd_pd(vec_imag00, dup_mi, res_real_sum); // -a*b+c res_imag_sum = _mm256_mul_pd(vec_real00, dup_mi); res_imag_sum = _mm256_fmadd_pd(vec_imag00, dup_mr, res_imag_sum); // a*b+c //// Pick 4 complex values from basis01 vec_before = _mm256_loadu_pd(ptr_vec + basis01); // (i1 r1 i0 r0) vec_after = _mm256_loadu_pd(ptr_vec + basis01 + 4); // (i3 r3 i2 r2) //// Split real values and imag values via shuffle vec_real01 = _mm256_shuffle_pd(vec_before, vec_after, 0); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (r3 r1 r2 r0) 0000 = 0 vec_imag01 = _mm256_shuffle_pd(vec_before, vec_after, 15); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (i3 i1 i2 i0) 1111 = 15 //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[2]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[3]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real01, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag01, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real01, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag01, dup_mr, res_imag_sum); // a*b+c //// Pick 4 complex values from basis10 vec_before = _mm256_loadu_pd(ptr_vec + basis10); // (i1 r1 i0 r0) vec_after = _mm256_loadu_pd(ptr_vec + basis10 + 4); // (i3 r3 i2 r2) //// Split real values and imag values via shuffle vec_real10 = _mm256_shuffle_pd(vec_before, vec_after, 0); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (r3 r1 r2 r0) 0000 = 0 vec_imag10 = _mm256_shuffle_pd(vec_before, vec_after, 15); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (i3 i1 i2 i0) 1111 = 15 //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[4]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[5]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real10, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag10, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real10, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag10, dup_mr, res_imag_sum); // a*b+c //// Pick 4 complex values from basis11 vec_before = _mm256_loadu_pd(ptr_vec + basis11); // (i1 r1 i0 r0) vec_after = _mm256_loadu_pd(ptr_vec + basis11 + 4); // (i3 r3 i2 r2) //// Split real values and imag values via shuffle vec_real11 = _mm256_shuffle_pd(vec_before, vec_after, 0); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (r3 r1 r2 r0) 0000 = 0 vec_imag11 = _mm256_shuffle_pd(vec_before, vec_after, 15); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (i3 i1 i2 i0) 1111 = 15 //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[6]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[7]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real11, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag11, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real11, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag11, dup_mr, res_imag_sum); // a*b+c //// Store vec_before = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 0); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r1 i1 i0 r0) 0000 = 0 vec_after = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 15); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r3 i3 i2 r2) 1111 = 15 _mm256_storeu_pd(ptr_vec + basis00, vec_before); _mm256_storeu_pd(ptr_vec + basis00 + 4, vec_after); // vector is already fetched, fetch successive matrix elements and perform dot(vec,vec) for other basis //// basis01 //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[8]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[9]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_mul_pd(vec_real00, dup_mr); res_real_sum = _mm256_fnmadd_pd(vec_imag00, dup_mi, res_real_sum); // -a*b+c res_imag_sum = _mm256_mul_pd(vec_real00, dup_mi); res_imag_sum = _mm256_fmadd_pd(vec_imag00, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[10]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[11]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real01, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag01, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real01, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag01, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[12]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[13]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real10, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag10, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real10, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag10, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[14]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[15]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real11, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag11, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real11, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag11, dup_mr, res_imag_sum); // a*b+c //// Store vec_before = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 0); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r1 i1 i0 r0) 0000 = 0 vec_after = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 15); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r3 i3 i2 r2) 1111 = 15 _mm256_storeu_pd(ptr_vec + basis01, vec_before); _mm256_storeu_pd(ptr_vec + basis01 + 4, vec_after); //// basis10 //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[16]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[17]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_mul_pd(vec_real00, dup_mr); res_real_sum = _mm256_fnmadd_pd(vec_imag00, dup_mi, res_real_sum); // -a*b+c res_imag_sum = _mm256_mul_pd(vec_real00, dup_mi); res_imag_sum = _mm256_fmadd_pd(vec_imag00, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[18]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[19]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real01, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag01, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real01, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag01, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[20]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[21]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real10, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag10, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real10, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag10, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[22]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[23]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real11, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag11, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real11, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag11, dup_mr, res_imag_sum); // a*b+c //// Store vec_before = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 0); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r1 i1 i0 r0) 0000 = 0 vec_after = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 15); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r3 i3 i2 r2) 1111 = 15 _mm256_storeu_pd(ptr_vec + basis10, vec_before); _mm256_storeu_pd(ptr_vec + basis10 + 4, vec_after); //// basis11 //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[24]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[25]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_mul_pd(vec_real00, dup_mr); res_real_sum = _mm256_fnmadd_pd(vec_imag00, dup_mi, res_real_sum); // -a*b+c res_imag_sum = _mm256_mul_pd(vec_real00, dup_mi); res_imag_sum = _mm256_fmadd_pd(vec_imag00, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[26]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[27]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real01, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag01, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real01, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag01, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[28]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[29]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real10, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag10, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real10, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag10, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[30]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[31]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real11, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag11, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real11, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag11, dup_mr, res_imag_sum); // a*b+c //// Store vec_before = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 0); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r1 i1 i0 r0) 0000 = 0 vec_after = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 15); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r3 i3 i2 r2) 1111 = 15 _mm256_storeu_pd(ptr_vec + basis11, vec_before); _mm256_storeu_pd(ptr_vec + basis11 + 4, vec_after); } } void double_qubit_dense_matrix_gate_simd_low(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE mat[16], CTYPE* vec, ITYPE dim) { assert(target_qubit_index1 < 2); assert(target_qubit_index2 < 2); assert(dim >= 8); // loop variables const ITYPE loop_dim = dim * 2; ITYPE state_index; double* ptr_vec = (double*)vec; const double* ptr_mat = (const double*)mat; if (target_qubit_index1 < target_qubit_index2) { #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; state_index += 16) { __m256d vec1, vec2, vec3, vec4; __m256d u1, u2, u3, u4, u1f, u2f, u3f, u4f; __m256d mr, mi; vec1 = _mm256_loadu_pd(ptr_vec + state_index); // c1 c0 vec1 = _mm256_permute4x64_pd(vec1, 78); // (c1 c0) -> (c0 c1) : 1032 = 1*2 + 4*3 + 16*0 + 64*1 = 64+12+2=78 vec2 = _mm256_loadu_pd(ptr_vec + state_index + 4); // c3 c2 vec2 = _mm256_permute4x64_pd(vec2, 78); // (c3 c2) -> (c2 c3) : 1032 = 1*2+4*3+16*0+32*1 = 46 vec3 = _mm256_loadu_pd(ptr_vec + state_index + 8); // c5 c4 u1 = _mm256_blend_pd(vec1, vec3, 3); // (c0 c1) (c5 c4) -> (c0 c4) : 0011 = 3 u2 = _mm256_blend_pd(vec1, vec3, 12); // (c0 c1) (c5 c4) -> (c5 c1) : 1100 = 12 u2 = _mm256_permute4x64_pd(u2, 78); // (c5 c1) -> (c1 c5) : 1032 = 1*2+4*3+16*0+64*1 = 64+12+2=78 vec4 = _mm256_loadu_pd(ptr_vec + state_index + 12); // c7 c6 u3 = _mm256_blend_pd(vec2, vec4, 3); // (c2 c3) (c7 c6) -> (c2 c6) : 0011 = 3 u4 = _mm256_blend_pd(vec2, vec4, 12); // (c2 c3) (c7 c6) -> (c7 c3) : 1100 = 12 u4 = _mm256_permute4x64_pd(u4, 78); // (c7 c3) -> (c3 c7) : 1032 = 1*2+4*3+16*0+32*1 = 46 u1f = _mm256_permute4x64_pd(u1, 177); // 2301 = 64*2+16*3+1 = 128+48+1 = 177 u2f = _mm256_permute4x64_pd(u2, 177); u3f = _mm256_permute4x64_pd(u3, 177); u4f = _mm256_permute4x64_pd(u4, 177); // u1 = (c0i c0r c4i c4r) // u2 = (c1i c1r c5i c5r) // u3 = (c2i c2r c6i c6r) // u4 = (c3i c3r c7i c7r) // u1f = (c0r c0i c4r c4i) // u2f = (c1r c1i c5r c5i) // u3f = (c2r c2i c6r c6i) // u4f = (c3r c3i c7r c7i) __m256d res_u1, res_u2, res_u3, res_u4, tmp_inv; tmp_inv = _mm256_set_pd(1, -1, 1, -1); mr = _mm256_set1_pd(ptr_mat[0]); res_u1 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[1]); res_u1 = _mm256_fmaddsub_pd(mi, u1f, res_u1); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[2]); res_u1 = _mm256_fmaddsub_pd(mr, u2, res_u1); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[3]); res_u1 = _mm256_fmaddsub_pd(mi, u2f, res_u1); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[4]); res_u1 = _mm256_fmaddsub_pd(mr, u3, res_u1); mi = _mm256_set1_pd(ptr_mat[5]); res_u1 = _mm256_fmaddsub_pd(mi, u3f, res_u1); mr = _mm256_set1_pd(ptr_mat[6]); res_u1 = _mm256_fmaddsub_pd(mr, u4, res_u1); mi = _mm256_set1_pd(ptr_mat[7]); res_u1 = _mm256_fmaddsub_pd(mi, u4f, res_u1); res_u1 = _mm256_mul_pd(res_u1, tmp_inv); mr = _mm256_set1_pd(ptr_mat[8]); res_u2 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[9]); res_u2 = _mm256_fmaddsub_pd(mi, u1f, res_u2); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[10]); res_u2 = _mm256_fmaddsub_pd(mr, u2, res_u2); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[11]); res_u2 = _mm256_fmaddsub_pd(mi, u2f, res_u2); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[12]); res_u2 = _mm256_fmaddsub_pd(mr, u3, res_u2); mi = _mm256_set1_pd(ptr_mat[13]); res_u2 = _mm256_fmaddsub_pd(mi, u3f, res_u2); mr = _mm256_set1_pd(ptr_mat[14]); res_u2 = _mm256_fmaddsub_pd(mr, u4, res_u2); mi = _mm256_set1_pd(ptr_mat[15]); res_u2 = _mm256_fmaddsub_pd(mi, u4f, res_u2); res_u2 = _mm256_mul_pd(res_u2, tmp_inv); res_u2 = _mm256_permute4x64_pd(res_u2, 78); // flip vec1 = _mm256_blend_pd(res_u1, res_u2, 3); // blend vec2 = _mm256_blend_pd(res_u1, res_u2, 12); // blend vec1 = _mm256_permute4x64_pd(vec1, 78); // flip _mm256_storeu_pd(ptr_vec + state_index, vec1); _mm256_storeu_pd(ptr_vec + state_index + 8, vec2); mr = _mm256_set1_pd(ptr_mat[16]); res_u3 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[17]); res_u3 = _mm256_fmaddsub_pd(mi, u1f, res_u3); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[18]); res_u3 = _mm256_fmaddsub_pd(mr, u2, res_u3); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[19]); res_u3 = _mm256_fmaddsub_pd(mi, u2f, res_u3); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[20]); res_u3 = _mm256_fmaddsub_pd(mr, u3, res_u3); mi = _mm256_set1_pd(ptr_mat[21]); res_u3 = _mm256_fmaddsub_pd(mi, u3f, res_u3); mr = _mm256_set1_pd(ptr_mat[22]); res_u3 = _mm256_fmaddsub_pd(mr, u4, res_u3); mi = _mm256_set1_pd(ptr_mat[23]); res_u3 = _mm256_fmaddsub_pd(mi, u4f, res_u3); res_u3 = _mm256_mul_pd(res_u3, tmp_inv); mr = _mm256_set1_pd(ptr_mat[24]); res_u4 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[25]); res_u4 = _mm256_fmaddsub_pd(mi, u1f, res_u4); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[26]); res_u4 = _mm256_fmaddsub_pd(mr, u2, res_u4); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[27]); res_u4 = _mm256_fmaddsub_pd(mi, u2f, res_u4); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[28]); res_u4 = _mm256_fmaddsub_pd(mr, u3, res_u4); mi = _mm256_set1_pd(ptr_mat[29]); res_u4 = _mm256_fmaddsub_pd(mi, u3f, res_u4); mr = _mm256_set1_pd(ptr_mat[30]); res_u4 = _mm256_fmaddsub_pd(mr, u4, res_u4); mi = _mm256_set1_pd(ptr_mat[31]); res_u4 = _mm256_fmaddsub_pd(mi, u4f, res_u4); res_u4 = _mm256_mul_pd(res_u4, tmp_inv); res_u4 = _mm256_permute4x64_pd(res_u4, 78); // flip vec3 = _mm256_blend_pd(res_u3, res_u4, 3); // blend vec4 = _mm256_blend_pd(res_u3, res_u4, 12); // blend vec3 = _mm256_permute4x64_pd(vec3, 78); // flip _mm256_storeu_pd(ptr_vec + state_index + 4, vec3); _mm256_storeu_pd(ptr_vec + state_index + 12, vec4); } } else { #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; state_index += 16) { __m256d vec1, vec2, vec3, vec4; __m256d u1, u2, u3, u4, u1f, u2f, u3f, u4f; __m256d mr, mi; vec1 = _mm256_loadu_pd(ptr_vec + state_index); // c1 c0 vec1 = _mm256_permute4x64_pd(vec1, 78); // (c1 c0) -> (c0 c1) : 1032 = 1*2 + 4*3 + 16*0 + 64*1 = 64+12+2=78 vec2 = _mm256_loadu_pd(ptr_vec + state_index + 4); // c3 c2 vec2 = _mm256_permute4x64_pd(vec2, 78); // (c3 c2) -> (c2 c3) : 1032 = 1*2+4*3+16*0+32*1 = 46 vec3 = _mm256_loadu_pd(ptr_vec + state_index + 8); // c5 c4 u1 = _mm256_blend_pd(vec1, vec3, 3); // (c0 c1) (c5 c4) -> (c0 c4) : 0011 = 3 u2 = _mm256_blend_pd(vec1, vec3, 12); // (c0 c1) (c5 c4) -> (c5 c1) : 1100 = 12 u2 = _mm256_permute4x64_pd(u2, 78); // (c5 c1) -> (c1 c5) : 1032 = 1*2+4*3+16*0+64*1 = 64+12+2=78 vec4 = _mm256_loadu_pd(ptr_vec + state_index + 12); // c7 c6 u3 = _mm256_blend_pd(vec2, vec4, 3); // (c2 c3) (c7 c6) -> (c2 c6) : 0011 = 3 u4 = _mm256_blend_pd(vec2, vec4, 12); // (c2 c3) (c7 c6) -> (c7 c3) : 1100 = 12 u4 = _mm256_permute4x64_pd(u4, 78); // (c7 c3) -> (c3 c7) : 1032 = 1*2+4*3+16*0+32*1 = 46 u1f = _mm256_permute4x64_pd(u1, 177); // 2301 = 64*2+16*3+1 = 128+48+1 = 177 u2f = _mm256_permute4x64_pd(u2, 177); u3f = _mm256_permute4x64_pd(u3, 177); u4f = _mm256_permute4x64_pd(u4, 177); // u1 = (c0i c0r c4i c4r) // u2 = (c1i c1r c5i c5r) // u3 = (c2i c2r c6i c6r) // u4 = (c3i c3r c7i c7r) // u1f = (c0r c0i c4r c4i) // u2f = (c1r c1i c5r c5i) // u3f = (c2r c2i c6r c6i) // u4f = (c3r c3i c7r c7i) __m256d res_u1, res_u2, res_u3, res_u4, tmp_inv; tmp_inv = _mm256_set_pd(1, -1, 1, -1); mr = _mm256_set1_pd(ptr_mat[0]); res_u1 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[1]); res_u1 = _mm256_fmaddsub_pd(mi, u1f, res_u1); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[2]); res_u1 = _mm256_fmaddsub_pd(mr, u3, res_u1); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[3]); res_u1 = _mm256_fmaddsub_pd(mi, u3f, res_u1); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[4]); res_u1 = _mm256_fmaddsub_pd(mr, u2, res_u1); mi = _mm256_set1_pd(ptr_mat[5]); res_u1 = _mm256_fmaddsub_pd(mi, u2f, res_u1); mr = _mm256_set1_pd(ptr_mat[6]); res_u1 = _mm256_fmaddsub_pd(mr, u4, res_u1); mi = _mm256_set1_pd(ptr_mat[7]); res_u1 = _mm256_fmaddsub_pd(mi, u4f, res_u1); res_u1 = _mm256_mul_pd(res_u1, tmp_inv); mr = _mm256_set1_pd(ptr_mat[16]); res_u3 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[17]); res_u3 = _mm256_fmaddsub_pd(mi, u1f, res_u3); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[18]); res_u3 = _mm256_fmaddsub_pd(mr, u3, res_u3); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[19]); res_u3 = _mm256_fmaddsub_pd(mi, u3f, res_u3); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[20]); res_u3 = _mm256_fmaddsub_pd(mr, u2, res_u3); mi = _mm256_set1_pd(ptr_mat[21]); res_u3 = _mm256_fmaddsub_pd(mi, u2f, res_u3); mr = _mm256_set1_pd(ptr_mat[22]); res_u3 = _mm256_fmaddsub_pd(mr, u4, res_u3); mi = _mm256_set1_pd(ptr_mat[23]); res_u3 = _mm256_fmaddsub_pd(mi, u4f, res_u3); res_u3 = _mm256_mul_pd(res_u3, tmp_inv); res_u3 = _mm256_permute4x64_pd(res_u3, 78); // flip vec1 = _mm256_blend_pd(res_u1, res_u3, 3); // blend vec3 = _mm256_blend_pd(res_u1, res_u3, 12); // blend vec1 = _mm256_permute4x64_pd(vec1, 78); // flip _mm256_storeu_pd(ptr_vec + state_index, vec1); _mm256_storeu_pd(ptr_vec + state_index + 8, vec3); mr = _mm256_set1_pd(ptr_mat[8]); res_u2 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[9]); res_u2 = _mm256_fmaddsub_pd(mi, u1f, res_u2); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[10]); res_u2 = _mm256_fmaddsub_pd(mr, u3, res_u2); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[11]); res_u2 = _mm256_fmaddsub_pd(mi, u3f, res_u2); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[12]); res_u2 = _mm256_fmaddsub_pd(mr, u2, res_u2); mi = _mm256_set1_pd(ptr_mat[13]); res_u2 = _mm256_fmaddsub_pd(mi, u2f, res_u2); mr = _mm256_set1_pd(ptr_mat[14]); res_u2 = _mm256_fmaddsub_pd(mr, u4, res_u2); mi = _mm256_set1_pd(ptr_mat[15]); res_u2 = _mm256_fmaddsub_pd(mi, u4f, res_u2); res_u2 = _mm256_mul_pd(res_u2, tmp_inv); mr = _mm256_set1_pd(ptr_mat[24]); res_u4 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[25]); res_u4 = _mm256_fmaddsub_pd(mi, u1f, res_u4); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[26]); res_u4 = _mm256_fmaddsub_pd(mr, u3, res_u4); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[27]); res_u4 = _mm256_fmaddsub_pd(mi, u3f, res_u4); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[28]); res_u4 = _mm256_fmaddsub_pd(mr, u2, res_u4); mi = _mm256_set1_pd(ptr_mat[29]); res_u4 = _mm256_fmaddsub_pd(mi, u2f, res_u4); mr = _mm256_set1_pd(ptr_mat[30]); res_u4 = _mm256_fmaddsub_pd(mr, u4, res_u4); mi = _mm256_set1_pd(ptr_mat[31]); res_u4 = _mm256_fmaddsub_pd(mi, u4f, res_u4); res_u4 = _mm256_mul_pd(res_u4, tmp_inv); res_u4 = _mm256_permute4x64_pd(res_u4, 78); // flip vec2 = _mm256_blend_pd(res_u2, res_u4, 3); // blend vec4 = _mm256_blend_pd(res_u2, res_u4, 12); // blend vec2 = _mm256_permute4x64_pd(vec2, 78); // flip _mm256_storeu_pd(ptr_vec + state_index + 4, vec2); _mm256_storeu_pd(ptr_vec + state_index + 12, vec4); } } } __inline void _element_swap(CTYPE* vec, UINT i1, UINT i2) { CTYPE temp = vec[i1]; vec[i1] = vec[i2]; vec[i2] = temp; } void double_qubit_dense_matrix_gate_simd_middle(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE _mat[16], CTYPE* vec, ITYPE dim) { CTYPE mat[16]; memcpy(mat, _mat, sizeof(CTYPE) * 16); if (target_qubit_index2 < target_qubit_index1) { UINT temp = target_qubit_index1; target_qubit_index1 = target_qubit_index2; target_qubit_index2 = temp; _element_swap(mat, 1, 2); _element_swap(mat, 4, 8); _element_swap(mat, 7, 11); _element_swap(mat, 13, 14); _element_swap(mat, 5, 10); _element_swap(mat, 6, 9); } assert(target_qubit_index1 < 2); assert(target_qubit_index2 >= 2); const UINT min_qubit_index = get_min_ui(target_qubit_index1, target_qubit_index2); const UINT max_qubit_index = get_max_ui(target_qubit_index1, target_qubit_index2); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE target_mask1_shift = 1ULL << (target_qubit_index1 + 1); const ITYPE target_mask2_shift = 1ULL << (target_qubit_index2 + 1); // loop variables const ITYPE loop_dim = dim / 4; ITYPE state_index; double* ptr_vec = (double*)vec; const double* ptr_mat = (const double*)mat; #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; state_index+=2) { // create index ITYPE basis00 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2); // shited due to index from complex -> double basis00 = basis00 << 1; //ITYPE basis01 = basis00 + target_mask1_shift; ITYPE basis10 = basis00 + target_mask2_shift; //ITYPE basis11 = basis01 + target_mask2_shift; //// Pick 4 complex values from basis00 __m256d vec_bef0, vec_aft0, vec_bef1, vec_aft1; vec_bef0 = _mm256_loadu_pd(ptr_vec + basis00); // (i1 r1 i0 r0) vec_aft0 = _mm256_loadu_pd(ptr_vec + basis00 + 4); // (i3 r3 i2 r2) vec_bef1 = _mm256_loadu_pd(ptr_vec + basis10); vec_aft1 = _mm256_loadu_pd(ptr_vec + basis10 + 4); __m256d vec_u0, vec_u1, vec_u2, vec_u3; __m256d vec_u0f, vec_u1f, vec_u2f, vec_u3f; __m256d vec_inv; vec_inv = _mm256_set_pd(1, -1, 1, -1); if (target_qubit_index1 == 0) { vec_aft0 = _mm256_permute4x64_pd(vec_aft0, 78); // (3 2 1 0) -> (1 0 3 2) 1*2 + 4*3 + 16*0 + 64*1 = 64+12+2 = 78 vec_aft1 = _mm256_permute4x64_pd(vec_aft1, 78); vec_u0 = _mm256_blend_pd(vec_bef0, vec_aft0, 12); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_u1 = _mm256_blend_pd(vec_bef0, vec_aft0, 3); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_u2 = _mm256_blend_pd(vec_bef1, vec_aft1, 12); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_u3 = _mm256_blend_pd(vec_bef1, vec_aft1, 3); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_u1 = _mm256_permute4x64_pd(vec_u1, 78); // (3 2 1 0) -> (1 0 3 2) 1*2 + 4*3 + 16*0 + 64*1 = 64+12+2 = 78 vec_u3 = _mm256_permute4x64_pd(vec_u3, 78); } else { vec_u0 = vec_bef0; vec_u1 = vec_aft0; vec_u2 = vec_bef1; vec_u3 = vec_aft1; } vec_u0f = _mm256_permute_pd(vec_u0, 5); // 1*1 + 2*0 + 4*1 + 8*0 vec_u1f = _mm256_permute_pd(vec_u1, 5); // 1*1 + 2*0 + 4*1 + 8*0 vec_u2f = _mm256_permute_pd(vec_u2, 5); // 1*1 + 2*0 + 4*1 + 8*0 vec_u3f = _mm256_permute_pd(vec_u3, 5); // 1*1 + 2*0 + 4*1 + 8*0 vec_u0f = _mm256_mul_pd(vec_u0f, vec_inv); vec_u1f = _mm256_mul_pd(vec_u1f, vec_inv); vec_u2f = _mm256_mul_pd(vec_u2f, vec_inv); vec_u3f = _mm256_mul_pd(vec_u3f, vec_inv); __m256d dup_mr, dup_mi; __m256d res_sum0, res_sum1, res_sum2, res_sum3; dup_mr = _mm256_set1_pd(ptr_mat[0]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[1]); // (mi0 mi0 mi0 mi0) res_sum0 = _mm256_mul_pd(vec_u0, dup_mr); res_sum0 = _mm256_fmadd_pd(vec_u0f, dup_mi, res_sum0); dup_mr = _mm256_set1_pd(ptr_mat[2]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[3]); // (mi1 mi1 mi1 mi1) res_sum0 = _mm256_fmadd_pd(vec_u1, dup_mr, res_sum0); res_sum0 = _mm256_fmadd_pd(vec_u1f, dup_mi, res_sum0); dup_mr = _mm256_set1_pd(ptr_mat[4]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[5]); // (mi1 mi1 mi1 mi1) res_sum0 = _mm256_fmadd_pd(vec_u2, dup_mr, res_sum0); res_sum0 = _mm256_fmadd_pd(vec_u2f, dup_mi, res_sum0); dup_mr = _mm256_set1_pd(ptr_mat[6]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[7]); // (mi1 mi1 mi1 mi1) res_sum0 = _mm256_fmadd_pd(vec_u3, dup_mr, res_sum0); res_sum0 = _mm256_fmadd_pd(vec_u3f, dup_mi, res_sum0); dup_mr = _mm256_set1_pd(ptr_mat[8]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[9]); // (mi0 mi0 mi0 mi0) res_sum1 = _mm256_mul_pd(vec_u0, dup_mr); res_sum1 = _mm256_fmadd_pd(vec_u0f, dup_mi, res_sum1); dup_mr = _mm256_set1_pd(ptr_mat[10]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[11]); // (mi1 mi1 mi1 mi1) res_sum1 = _mm256_fmadd_pd(vec_u1, dup_mr, res_sum1); res_sum1 = _mm256_fmadd_pd(vec_u1f, dup_mi, res_sum1); dup_mr = _mm256_set1_pd(ptr_mat[12]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[13]); // (mi1 mi1 mi1 mi1) res_sum1 = _mm256_fmadd_pd(vec_u2, dup_mr, res_sum1); res_sum1 = _mm256_fmadd_pd(vec_u2f, dup_mi, res_sum1); dup_mr = _mm256_set1_pd(ptr_mat[14]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[15]); // (mi1 mi1 mi1 mi1) res_sum1 = _mm256_fmadd_pd(vec_u3, dup_mr, res_sum1); res_sum1 = _mm256_fmadd_pd(vec_u3f, dup_mi, res_sum1); dup_mr = _mm256_set1_pd(ptr_mat[16]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[17]); // (mi0 mi0 mi0 mi0) res_sum2 = _mm256_mul_pd(vec_u0, dup_mr); res_sum2 = _mm256_fmadd_pd(vec_u0f, dup_mi, res_sum2); dup_mr = _mm256_set1_pd(ptr_mat[18]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[19]); // (mi1 mi1 mi1 mi1) res_sum2 = _mm256_fmadd_pd(vec_u1, dup_mr, res_sum2); res_sum2 = _mm256_fmadd_pd(vec_u1f, dup_mi, res_sum2); dup_mr = _mm256_set1_pd(ptr_mat[20]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[21]); // (mi1 mi1 mi1 mi1) res_sum2 = _mm256_fmadd_pd(vec_u2, dup_mr, res_sum2); res_sum2 = _mm256_fmadd_pd(vec_u2f, dup_mi, res_sum2); dup_mr = _mm256_set1_pd(ptr_mat[22]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[23]); // (mi1 mi1 mi1 mi1) res_sum2 = _mm256_fmadd_pd(vec_u3, dup_mr, res_sum2); res_sum2 = _mm256_fmadd_pd(vec_u3f, dup_mi, res_sum2); dup_mr = _mm256_set1_pd(ptr_mat[24]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[25]); // (mi0 mi0 mi0 mi0) res_sum3 = _mm256_mul_pd(vec_u0, dup_mr); res_sum3 = _mm256_fmadd_pd(vec_u0f, dup_mi, res_sum3); dup_mr = _mm256_set1_pd(ptr_mat[26]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[27]); // (mi1 mi1 mi1 mi1) res_sum3 = _mm256_fmadd_pd(vec_u1, dup_mr, res_sum3); res_sum3 = _mm256_fmadd_pd(vec_u1f, dup_mi, res_sum3); dup_mr = _mm256_set1_pd(ptr_mat[28]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[29]); // (mi1 mi1 mi1 mi1) res_sum3 = _mm256_fmadd_pd(vec_u2, dup_mr, res_sum3); res_sum3 = _mm256_fmadd_pd(vec_u2f, dup_mi, res_sum3); dup_mr = _mm256_set1_pd(ptr_mat[30]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[31]); // (mi1 mi1 mi1 mi1) res_sum3 = _mm256_fmadd_pd(vec_u3, dup_mr, res_sum3); res_sum3 = _mm256_fmadd_pd(vec_u3f, dup_mi, res_sum3); if (target_qubit_index1 == 0) { res_sum1 = _mm256_permute4x64_pd(res_sum1, 78); // (3 2 1 0) -> (1 0 3 2) 1*2 + 4*3 + 16*0 + 64*1 = 64+12+2 = 78 res_sum3 = _mm256_permute4x64_pd(res_sum3, 78); vec_bef0 = _mm256_blend_pd(res_sum0, res_sum1, 12); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_aft0 = _mm256_blend_pd(res_sum0, res_sum1, 3); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_bef1 = _mm256_blend_pd(res_sum2, res_sum3, 12); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_aft1 = _mm256_blend_pd(res_sum2, res_sum3, 3); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_aft0 = _mm256_permute4x64_pd(vec_aft0, 78); // (3 2 1 0) -> (1 0 3 2) 1*2 + 4*3 + 16*0 + 64*1 = 64+12+2 = 78 vec_aft1 = _mm256_permute4x64_pd(vec_aft1, 78); } else { vec_bef0 = res_sum0; vec_aft0 = res_sum1; vec_bef1 = res_sum2; vec_aft1 = res_sum3; } //// Store _mm256_storeu_pd(ptr_vec + basis00, vec_bef0); // (i1 r1 i0 r0) _mm256_storeu_pd(ptr_vec + basis00 + 4, vec_aft0); // (i3 r3 i2 r2) _mm256_storeu_pd(ptr_vec + basis10, vec_bef1); _mm256_storeu_pd(ptr_vec + basis10 + 4, vec_aft1); } } void double_qubit_dense_matrix_gate_simd(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE mat[16], CTYPE* vec, ITYPE dim) { assert(target_qubit_index1 != target_qubit_index2); if (dim == 4) { // avx2 code cannot use for 2-qubit state double_qubit_dense_matrix_gate_nosimd(target_qubit_index1, target_qubit_index2, mat, vec, dim); } else if (target_qubit_index1 >= 2 && target_qubit_index2 >= 2) { double_qubit_dense_matrix_gate_simd_high(target_qubit_index1, target_qubit_index2, mat, vec, dim); } else if (target_qubit_index1 >= 2 || target_qubit_index2 >= 2) { double_qubit_dense_matrix_gate_simd_middle(target_qubit_index1, target_qubit_index2, mat, vec, dim); } else { double_qubit_dense_matrix_gate_simd_low(target_qubit_index1, target_qubit_index2, mat, vec, dim); } } #endif
common.h
#pragma once #include <Eigen/Core> #include <Eigen/SparseCore> #include <glog/logging.h> #include <iostream> namespace solvers { #if USE_FLOAT using Double = float; #else using Double = double; #endif using Matrix = Eigen::Matrix<Double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>; using MatrixMap = Eigen::Map<const Matrix>; using Vector = Eigen::Matrix<Double, Eigen::Dynamic, 1>; using VectorMap = Eigen::Map<const Vector>; using RowVector = Eigen::Matrix<Double, 1, Eigen::Dynamic, Eigen::RowMajor>; using RowVectorMap = Eigen::Map<const RowVector>; using IdxVector = Eigen::Matrix<int64_t, Eigen::Dynamic, 1>; using IdxVectorMap = Eigen::Map<const IdxVector>; using SpMatrix = Eigen::SparseMatrix<Double, Eigen::RowMajor>; using SpMatrixMap = Eigen::Map<const SpMatrix>; using SpVector = Eigen::SparseVector<Double>; /** subtract row mean from each row in data matrix */ inline void center(Double* const XData, const size_t rows, const size_t cols) { #pragma omp parallel for for (size_t r = 0; r < rows; ++r) { Double sum = 0; for (size_t c = 0; c < cols; ++c) { sum += XData[r*cols + c]; } sum /= cols; for (size_t c = 0; c < cols; ++c) { XData[r*cols + c] -= sum; } } } /** L2 normalize each row in data matrix */ inline void normalize(Double* const XData, const size_t rows, const size_t cols) { #pragma omp parallel for for (size_t r = 0; r < rows; ++r) { Double sum = 0; for (size_t c = 0; c < cols; ++c) { Double x = XData[r*cols + c]; sum += x * x; } sum = std::sqrt(sum); if (sum > 0) { for (size_t c = 0; c < cols; ++c) { XData[r*cols + c] /= sum; } } } } /** L2 normalize, sparse */ inline void normalize(const size_t rows, const size_t cols, const size_t nnz, const int32_t* const indptr, const int32_t* const indices, Double* const values) { #pragma omp parallel for for (size_t r = 0; r < rows; ++r) { Double sum = 0; for (int32_t idx = indptr[r]; idx < indptr[r + 1]; ++idx) { Double x = values[idx]; sum += x * x; } sum = std::sqrt(sum); if (sum > 0) { for (int32_t idx = indptr[r]; idx < indptr[r + 1]; ++idx) { values[idx] /= sum; } } } } }
DRB003-antidep2-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A two-level loop nest with loop carried anti-dependence on the outer level. Data race pair: a[i][j]@67:7 vs. a[i+1][j]@67:18 */ #include <stdio.h> #include <stdlib.h> int main(int argc,char *argv[]) { int i, j; int len = 20; double a[20][20]; #pragma omp parallel for private(i, j) for (i=0; i< len; i++) #pragma omp parallel for private(j) for (j=0; j<len; j++) a[i][j] = (i * len + j + 0.5); for (i = 0; i < len - 1; i += 1) { #pragma omp parallel for for (j = 0; j < len ; j += 1) { a[i][j] += a[i + 1][j]; } } for (i=0; i< len; i++) for (j=0; j<len; j++) printf("%lf",a[i][j]); printf ("a[10][10]=%f\n", a[10][10]); return 0; }
dotp.c
#include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define REPEAT 100 int array_size = 10000000; double *gen_array(int n) { double *array = malloc(sizeof(*array) * n); for (int i = 0; i < n; i++) array[i] = drand48(); return array; } double dotp_naive(double *x, double *y, int arr_size) { double global_sum = 0.0; #pragma omp parallel { #pragma omp for for (int i = 0; i < arr_size; i++) #pragma omp critical global_sum += x[i] * y[i]; } return global_sum; } // EDIT THIS FUNCTION PART 1 double dotp_manual_optimized(double *x, double *y, int arr_size) { int nt; #pragma omp parallel { nt = omp_get_num_threads(); } double *sums = calloc(nt, sizeof(double)); #pragma omp parallel { int tid = omp_get_thread_num(); double each_sum = 0.0; for (int i = tid; i < arr_size; i += nt) each_sum += x[i] * y[i]; sums[tid] = each_sum; } double sum = 0; for (int i = 0; i < nt; i++) sum += sums[i]; free(sums); return sum; } // EDIT THIS FUNCTION PART 2 double dotp_reduction_optimized(double *x, double *y, int arr_size) { double global_sum = 0.0; #pragma omp parallel for reduction(+ : global_sum) for (int i = 0; i < arr_size; i++) global_sum += x[i] * y[i]; return global_sum; } int main() { // Generate input vectors double *x = gen_array(array_size), *y = gen_array(array_size); double start_time, run_time; double serial_result, result; int ret = 0; // calculate result serially start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) { serial_result = 0.0; for (int i = 0; i < array_size; i++) serial_result += x[i] * y[i]; } run_time = omp_get_wtime() - start_time; printf("Naive solution took %f seconds\n", run_time); int num_threads = omp_get_max_threads(); for (int i = 1; i <= num_threads; i++) { omp_set_num_threads(i); start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) result = dotp_manual_optimized(x, y, array_size); run_time = omp_get_wtime() - start_time; printf("Manual Optimized: %d thread(s) took %f seconds\n", i, run_time); // verify result is correct (within some threshold) if (fabs(serial_result - result) > 0.001) { printf("%g %g %g", serial_result, result, serial_result - result); printf("Incorrect result!\n"); ret = -1; goto exit; } } for (int i = 1; i <= num_threads; i++) { omp_set_num_threads(i); start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) result = dotp_reduction_optimized(x, y, array_size); run_time = omp_get_wtime() - start_time; printf("Reduction Optimized: %d thread(s) took %f seconds\n", i, run_time); // verify result is correct (within some threshold) if (fabs(serial_result - result) > 0.001) { printf("Incorrect result!\n"); ret = -1; goto exit; } } // Only run this once because it's too slow.. omp_set_num_threads(1); start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) result = dotp_naive(x, y, array_size); run_time = omp_get_wtime() - start_time; printf("Naive: %d thread(s) took %f seconds\n", 1, run_time); exit: free(x); free(y); return ret; }
image-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickCore Image View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/MagickCore.h" #include "MagickCore/exception-private.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/thread-private.h" /* Typedef declarations. */ struct _ImageView { char *description; RectangleInfo extent; Image *image; CacheView *view; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageView() makes a copy of the specified image view. % % The format of the CloneImageView method is: % % ImageView *CloneImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *CloneImageView(const ImageView *image_view) { ImageView *clone_view; assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); clone_view=(ImageView *) AcquireCriticalMemory(sizeof(*clone_view)); (void) memset(clone_view,0,sizeof(*clone_view)); clone_view->description=ConstantString(image_view->description); clone_view->extent=image_view->extent; clone_view->view=CloneCacheView(image_view->view); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,image_view->exception); clone_view->debug=image_view->debug; clone_view->signature=MagickCoreSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageView() deallocates memory associated with a image view. % % The format of the DestroyImageView method is: % % ImageView *DestroyImageView(ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *DestroyImageView(ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); if (image_view->description != (char *) NULL) image_view->description=DestroyString(image_view->description); image_view->view=DestroyCacheView(image_view->view); image_view->exception=DestroyExceptionInfo(image_view->exception); image_view->signature=(~MagickCoreSignature); image_view=(ImageView *) RelinquishMagickMemory(image_view); return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferImageViewIterator() iterates over three image views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination image view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source, % const ImageView *duplex,ImageView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferImageViewIterator method is: % % MagickBooleanType DuplexTransferImageViewIterator(ImageView *source, % ImageView *duplex,ImageView *destination, % DuplexTransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o duplex: the duplex image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType DuplexTransferImageViewIterator( ImageView *source,ImageView *duplex,ImageView *destination, DuplexTransferImageViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (transfer == (DuplexTransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; const Quantum *magick_restrict duplex_pixels, *magick_restrict pixels; Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticMetacontent() returns the image view authentic % meta-content. % % The format of the GetImageViewAuthenticPixels method is: % % void *GetImageViewAuthenticMetacontent( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport void *GetImageViewAuthenticMetacontent( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewAuthenticMetacontent(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticPixels() returns the image view authentic pixels. % % The format of the GetImageViewAuthenticPixels method is: % % Quantum *GetImageViewAuthenticPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Quantum *GetImageViewAuthenticPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewAuthenticPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewException() returns the severity, reason, and description of any % error that occurs when utilizing a image view. % % The format of the GetImageViewException method is: % % char *GetImageViewException(const PixelImage *image_view, % ExceptionType *severity) % % A description of each parameter follows: % % o image_view: the pixel image_view. % % o severity: the severity of the error is returned here. % */ MagickExport char *GetImageViewException(const ImageView *image_view, ExceptionType *severity) { char *description; assert(image_view != (const ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); assert(severity != (ExceptionType *) NULL); *severity=image_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent, sizeof(*description)); if (description == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *description='\0'; if (image_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->reason), MagickPathExtent); if (image_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MagickPathExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->description), MagickPathExtent); (void) ConcatenateMagickString(description,")",MagickPathExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewExtent() returns the image view extent. % % The format of the GetImageViewExtent method is: % % RectangleInfo GetImageViewExtent(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(image_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewImage() returns the image associated with the image view. % % The format of the GetImageViewImage method is: % % MagickCore *GetImageViewImage(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Image *GetImageViewImage(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(image_view->image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewIterator() iterates over the image view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetImageViewIterator method is: % % MagickBooleanType GetImageViewIterator(ImageView *source, % GetImageViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o get: the get callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType GetImageViewIterator(ImageView *source, GetImageViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (get == (GetImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); const Quantum *pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualMetacontent() returns the image view virtual % meta-content. % % The format of the GetImageViewVirtualMetacontent method is: % % const void *GetImageViewVirtualMetacontent( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const void *GetImageViewVirtualMetacontent( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewVirtualMetacontent(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualPixels() returns the image view virtual pixels. % % The format of the GetImageViewVirtualPixels method is: % % const Quantum *GetImageViewVirtualPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const Quantum *GetImageViewVirtualPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewVirtualPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageView() returns MagickTrue if the parameter is verified as a image % view object. % % The format of the IsImageView method is: % % MagickBooleanType IsImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport MagickBooleanType IsImageView(const ImageView *image_view) { if (image_view == (const ImageView *) NULL) return(MagickFalse); if (image_view->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageView() returns a image view required for all other methods in the % Image View API. % % The format of the NewImageView method is: % % ImageView *NewImageView(MagickCore *wand,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageView *NewImageView(Image *image,ExceptionInfo *exception) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); image_view=(ImageView *) AcquireCriticalMemory(sizeof(*image_view)); (void) memset(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->image=image; image_view->view=AcquireVirtualCacheView(image_view->image,exception); image_view->extent.width=image->columns; image_view->extent.height=image->rows; image_view->extent.x=0; image_view->extent.y=0; image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickCoreSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageViewRegion() returns a image view required for all other methods % in the Image View API. % % The format of the NewImageViewRegion method is: % % ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height, % ExceptionInfo *exception) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x, const ssize_t y,const size_t width,const size_t height, ExceptionInfo *exception) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); image_view=(ImageView *) AcquireCriticalMemory(sizeof(*image_view)); (void) memset(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->view=AcquireVirtualCacheView(image_view->image,exception); image_view->image=image; image_view->extent.width=width; image_view->extent.height=height; image_view->extent.x=x; image_view->extent.y=y; image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickCoreSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewDescription() associates a description with an image view. % % The format of the SetImageViewDescription method is: % % void SetImageViewDescription(ImageView *image_view, % const char *description) % % A description of each parameter follows: % % o image_view: the image view. % % o description: the image view description. % */ MagickExport void SetImageViewDescription(ImageView *image_view, const char *description) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); image_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewIterator() iterates over the image view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetImageViewIterator method is: % % MagickBooleanType SetImageViewIterator(ImageView *destination, % SetImageViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the image view. % % o set: the set callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination, SetImageViewMethod set,void *context) { Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (ImageView *) NULL); assert(destination->signature == MagickCoreSignature); if (set == (SetImageViewMethod) NULL) return(MagickFalse); destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=destination->extent.height-destination->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,destination->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(destination_image,destination->description, progress,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferImageViewIterator() iterates over two image views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination image view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const ImageView *source, % ImageView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferImageViewIterator method is: % % MagickBooleanType TransferImageViewIterator(ImageView *source, % ImageView *destination,TransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source, ImageView *destination,TransferImageViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (transfer == (TransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; const Quantum *magick_restrict pixels; Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateImageViewIterator() iterates over the image view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateImageViewIterator method is: % % MagickBooleanType UpdateImageViewIterator(ImageView *source, % UpdateImageViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o update: the update callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source, UpdateImageViewMethod update,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (update == (UpdateImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=SetImageStorageClass(source_image,DirectClass,source->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (update(source,y,id,context) == MagickFalse) status=MagickFalse; status=SyncCacheViewAuthenticPixels(source->view,source->exception); if (status == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
GB_binop__isne_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isne_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__isne_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_uint8) // A*D function (colscale): GB (_AxD__isne_uint8) // D*A function (rowscale): GB (_DxB__isne_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__isne_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__isne_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_uint8) // C=scalar+B GB (_bind1st__isne_uint8) // C=scalar+B' GB (_bind1st_tran__isne_uint8) // C=A+scalar GB (_bind2nd__isne_uint8) // C=A'+scalar GB (_bind2nd_tran__isne_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_UINT8 || GxB_NO_ISNE_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isne_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isne_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isne_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isne_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isne_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__isne_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__isne_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint64_uint64 // op(A') function: GB_tran__identity_uint64_uint64 // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint64_uint64 ( uint64_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
BenchUtils.h
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #include <chrono> #include <functional> #include <vector> #include <immintrin.h> #ifdef USE_BLAS #if __APPLE__ // not sure whether need to differentiate TARGET_OS_MAC or TARGET_OS_IPHONE, // etc. #include <Accelerate/Accelerate.h> #else #include <cblas.h> #endif #endif #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MKL #include <mkl.h> #endif #include "./AlignedVec.h" #include "fbgemm/FbgemmBuild.h" #include "fbgemm/FbgemmPackMatrixB.h" #include "src/RefImplementations.h" namespace fbgemm { template <typename T> void randFill(aligned_vector<T>& vec, T low, T high); void llc_flush(std::vector<char>& llc); // Same as omp_get_max_threads() when OpenMP is available, otherwise 1 int fbgemm_get_max_threads(); // Same as omp_get_num_threads() when OpenMP is available, otherwise 1 int fbgemm_get_num_threads(); // Same as omp_get_thread_num() when OpenMP is available, otherwise 0 int fbgemm_get_thread_num(); template <typename T> NOINLINE float cache_evict(const T& vec) { auto const size = vec.size(); auto const elemSize = sizeof(typename T::value_type); auto const dataSize = size * elemSize; const char* data = reinterpret_cast<const char*>(vec.data()); constexpr int CACHE_LINE_SIZE = 64; // Not having this dummy computation significantly slows down the computation // that follows. float dummy = 0.0f; for (std::size_t i = 0; i < dataSize; i += CACHE_LINE_SIZE) { dummy += data[i] * 1.0f; _mm_mfence(); #ifndef _MSC_VER asm volatile("" ::: "memory"); #endif _mm_clflush(&data[i]); } return dummy; } /** * Parse application command line arguments * */ int parseArgumentInt( int argc, const char* argv[], const char* arg, int non_exist_val, int def_val); bool parseArgumentBool( int argc, const char* argv[], const char* arg, bool def_val); namespace { struct empty_flush { void operator()() const {} }; } // namespace /** * @param Fn functor to execute * @param Fe data eviction functor */ template <class Fn, class Fe = std::function<void()>> double measureWithWarmup( Fn&& fn, int warmupIterations, int measuredIterations, const Fe& fe = empty_flush(), bool useOpenMP = false) { for (int i = 0; i < warmupIterations; ++i) { // Evict data first fe(); fn(); } double ttot = 0.0; #ifdef _OPENMP #pragma omp parallel if (useOpenMP) { #endif for (int i = 0; i < measuredIterations; ++i) { int thread_id = 0; std::chrono::time_point<std::chrono::high_resolution_clock> start, end; #ifdef _OPENMP if (useOpenMP) { thread_id = omp_get_thread_num(); } #endif if (thread_id == 0) { fe(); } #ifdef _OPENMP if (useOpenMP) { #pragma omp barrier } #endif start = std::chrono::high_resolution_clock::now(); fn(); #ifdef _OPENMP if (useOpenMP) { #pragma omp barrier } #endif end = std::chrono::high_resolution_clock::now(); auto dur = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start); if (thread_id == 0) { // TODO: measure load imbalance ttot += dur.count(); } } #ifdef _OPENMP } #endif return ttot / 1e9 / measuredIterations; } /* * @brief Out-of-place transposition for M*N matrix ref. * @param M number of rows in input * @param K number of columns in input */ template <typename T> void transpose_matrix( int M, int N, const T* src, int ld_src, T* dst, int ld_dst) { for (int i = 0; i < N; ++i) { for (int j = 0; j < M; ++j) { dst[i * ld_dst + j] = src[i + j * ld_src]; } } // for each output row } /* * @brief In-place transposition for nxk matrix ref. * @param n number of rows in input (number of columns in output) * @param k number of columns in input (number of rows in output) */ template <typename T> void transpose_matrix(T* ref, int n, int k) { std::vector<T> local(n * k); transpose_matrix(n, k, ref, k, local.data(), n); memcpy(ref, local.data(), n * k * sizeof(T)); } #if defined(USE_MKL) void test_xerbla(char* srname, const int* info, int); #endif #define dataset 1 template <typename btype> void performance_test( int num_instances, bool flush, int repetitions, bool is_mkl) { #if defined(USE_MKL) mkl_set_xerbla((XerblaEntry)test_xerbla); #endif float alpha = 1.f, beta = 1.f; matrix_op_t btran = matrix_op_t::Transpose; #if dataset == 1 const int NITER = (flush) ? 10 : 100; std::vector<std::vector<int>> shapes; for (auto m = 1; m < 120; m++) { // shapes.push_back({m, 128, 512}); shapes.push_back({m, 512, 512}); } #elif dataset == 2 const int NITER = (flush) ? 10 : 100; #include "shapes_dataset.h" #else flush = false; constexpr int NITER = 1; std::vector<std::vector<int>> shapes; std::random_device r; std::default_random_engine generator(r()); std::uniform_int_distribution<int> dm(1, 100); std::uniform_int_distribution<int> dnk(1, 1024); for (int i = 0; i < 1000; i++) { int m = dm(generator); int n = dnk(generator); int k = dnk(generator); shapes.push_back({m, n, k}); } #endif std::string type; double gflops, gbs, ttot; for (auto s : shapes) { int m = s[0]; int n = s[1]; int k = s[2]; // initialize with small numbers aligned_vector<int> Aint(m * k); randFill(Aint, 0, 4); std::vector<aligned_vector<float>> A; for (int i = 0; i < num_instances; ++i) { A.push_back(aligned_vector<float>(Aint.begin(), Aint.end())); } aligned_vector<int> Bint(k * n); randFill(Bint, 0, 4); aligned_vector<float> B(Bint.begin(), Bint.end()); std::vector<std::unique_ptr<PackedGemmMatrixB<btype>>> Bp; for (int i = 0; i < num_instances; ++i) { Bp.emplace_back(std::unique_ptr<PackedGemmMatrixB<btype>>( new PackedGemmMatrixB<btype>(btran, k, n, alpha, B.data()))); } auto kAligned = ((k * sizeof(float) + 64) & ~63) / sizeof(float); auto nAligned = ((n * sizeof(float) + 64) & ~63) / sizeof(float); std::vector<aligned_vector<float>> Bt(num_instances); auto& Bt_ref = Bt[0]; if (btran == matrix_op_t::Transpose) { Bt_ref.resize(k * nAligned); for (auto row = 0; row < k; ++row) { for (auto col = 0; col < n; ++col) { Bt_ref[row * nAligned + col] = alpha * B[col * k + row]; } } } else { Bt_ref.resize(kAligned * n); for (auto row = 0; row < k; ++row) { for (auto col = 0; col < n; ++col) { Bt_ref[col * kAligned + row] = alpha * B[col * k + row]; } } } for (auto i = 1; i < num_instances; ++i) { Bt[i] = Bt_ref; } std::vector<aligned_vector<float>> C_ref; std::vector<aligned_vector<float>> C_fb; if (beta != 0.0f) { aligned_vector<int> Cint(m * n); randFill(Cint, 0, 4); for (int i = 0; i < num_instances; ++i) { C_ref.push_back(aligned_vector<float>(Cint.begin(), Cint.end())); C_fb.push_back(aligned_vector<float>(Cint.begin(), Cint.end())); } } else { for (int i = 0; i < num_instances; ++i) { C_ref.push_back(aligned_vector<float>(m * n, 1.f)); C_fb.push_back(aligned_vector<float>(m * n, NAN)); } } double nflops = 2.0 * m * n * k; double nbytes = 4.0 * m * k + sizeof(btype) * 1.0 * k * n + 4.0 * m * n; // warm up MKL and fbgemm // check correctness at the same time for (auto w = 0; w < 3; w++) { #if defined(USE_MKL) || defined(USE_BLAS) cblas_sgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans, // B is pretransposed, if required by operation m, n, k, 1.0, // Mutliplication by Alpha is done during transpose of B A[0].data(), k, Bt[0].data(), btran == matrix_op_t::NoTranspose ? kAligned : nAligned, beta, C_ref[0].data(), n); #else cblas_sgemm_ref( matrix_op_t::NoTranspose, matrix_op_t::NoTranspose, m, n, k, 1.0, A[0].data(), k, Bt[0].data(), (btran == matrix_op_t::NoTranspose) ? kAligned : nAligned, beta, C_ref[0].data(), n); #endif #ifdef _OPENMP #pragma omp parallel if (num_instances == 1) #endif { int num_threads = num_instances == 1 ? fbgemm_get_num_threads() : 1; int tid = num_instances == 1 ? fbgemm_get_thread_num() : 0; cblas_gemm_compute( matrix_op_t::NoTranspose, m, A[0].data(), *Bp[0], beta, C_fb[0].data(), tid, num_threads); } #if defined(USE_MKL) || defined(USE_BLAS) // Compare results for (auto i = 0; i < C_ref[0].size(); i++) { if (std::abs(C_ref[0][i] - C_fb[0][i]) > 1e-3) { fprintf( stderr, "Error: too high diff between fp32 ref %f and fp16 %f at %d\n", C_ref[0][i], C_fb[0][i], i); return; } } #endif } #if defined(USE_MKL) if (is_mkl) { // Gold via MKL sgemm type = "MKL_FP32"; #elif defined(USE_BLAS) type = "BLAS_FP32"; #else type = "REF_FP32"; #endif ttot = measureWithWarmup( [&]() { int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num(); for (int i = 0; i < repetitions; ++i) { #if defined(USE_MKL) || defined(USE_BLAS) cblas_sgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans, m, n, k, 1.0, A[copy].data(), k, Bt[copy].data(), btran == matrix_op_t::NoTranspose ? kAligned : nAligned, beta, C_ref[copy].data(), n); #else cblas_sgemm_ref( matrix_op_t::NoTranspose, matrix_op_t::NoTranspose, m, n, k, 1.0, A[copy].data(), k, Bt[copy].data(), (btran == matrix_op_t::NoTranspose) ? kAligned : nAligned, beta, C_ref[copy].data(), n); #endif } }, 3, NITER, [&]() { if (flush) { int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num(); cache_evict(A[copy]); cache_evict(Bt[copy]); cache_evict(C_ref[copy]); } }, // Use OpenMP if num instances > 1 num_instances > 1); gflops = nflops / ttot / 1e9; gbs = nbytes / ttot / 1e9; printf( "\n%30s m = %5d n = %5d k = %5d Gflops = %8.4lf GBytes = %8.4lf\n", type.c_str(), m, n, k, gflops * repetitions, gbs * repetitions); #ifdef USE_MKL } #endif type = "FBP_" + std::string(typeid(btype).name()); ttot = measureWithWarmup( [&]() { // When executing in data decomposition (single-instance) mode // Different threads will access different regions of the same // matrices. Thus, copy to be used is always 0. The numbers of // threads would be the as number of threads in the parallel // region. // When running in functional decomposition (multi-instance) mode // different matrices are used. The copy to be used selected by // thread_id (thread_num), and the number of threads performance // the compute of the same instance is 1. int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num(); int num_threads = num_instances == 1 ? fbgemm_get_num_threads() : 1; int tid = num_instances == 1 ? fbgemm_get_thread_num() : 0; for (int i = 0; i < repetitions; ++i) { cblas_gemm_compute( matrix_op_t::NoTranspose, m, A[copy].data(), *Bp[copy], beta, C_fb[copy].data(), tid, num_threads); } }, 3, NITER, [&]() { if (flush) { int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num(); cache_evict(A[copy]); cache_evict(*Bp[copy]); cache_evict(C_fb[copy]); } }, true /*useOpenMP*/); gflops = nflops / ttot / 1e9; gbs = nbytes / ttot / 1e9; printf( "%30s m = %5d n = %5d k = %5d Gflops = %8.4lf GBytes = %8.4lf\n", type.c_str(), m, n, k, gflops * repetitions, gbs * repetitions); } } aligned_vector<float> getRandomSparseVector( unsigned size, float fractionNonZeros = 1.0); template <typename T> aligned_vector<T> getRandomBlockSparseMatrix( int Rows, int Cols, float fractionNonZerosBlocks = 1.0, int RowBlockSize = 4, int ColBlockSize = 1, T low = 0, T high = 9); } // namespace fbgemm
DRB052-indirectaccesssharebase-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This example is to mimic a memory access pattern extracted from an LLNL proxy app. Two pointers have distance of 12. They are used as base addresses of two arrays, indexed through an index set. The index set has no two indices with distance of 12. So there is no loop carried dependence. */ #include <assert.h> #include <stdio.h> #include <stdlib.h> int indexSet[180] = {521, 523, 525, 527, 529, 531, 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 921, 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013}; int main(int argc, char * argv[]) { double * base = (double * )malloc(sizeof (double)*((2013+12)+1)); double * xa1 = base; double * xa2 = base+12; int i; int _ret_val_0; if (base==0) { printf("Error, malloc() returns NULL. End execution. \n"); _ret_val_0=1; return _ret_val_0; } #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for for (i=521; i<=2025; ++ i) { base[i]=0.0; } /* this level of loop has no loop carried dependence */ #pragma loop name main#1 for (i=0; i<180; ++ i) { int idx = indexSet[i]; xa1[idx]+=1.0; xa2[idx]+=3.0; } /* verify the results, no overlapping of xa1 vs. xa2, no addition happens to the same element twice */ #pragma loop name main#2 for (i=521; i<=2025; ++ i) { /* printf ("%f ", base[i]); */ (((void)sizeof ((base[i]!=4.0) ? 1 : 0)), ({ if (base[i]!=4.0) { ; } else { __assert_fail("base[i]!=4.0", "DRB052-indirectaccesssharebase-orig-no.c", 126, __PRETTY_FUNCTION__); } })); } free(base); _ret_val_0=0; return _ret_val_0; }
GB_unaryop__minv_uint8_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint8_uint16 // op(A') function: GB_tran__minv_uint8_uint16 // C type: uint8_t // A type: uint16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 8) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 8) ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint8_uint16 ( uint8_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
comm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /** * Copyright (c) 2015 by Contributors */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <dmlc/omp.h> #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include <tuple> #include <thread> #include "mxnet/ndarray.h" #include "gradient_compression.h" #include "../ndarray/ndarray_function.h" #include "../operator/tensor/sparse_retain-inl.h" #include "./kvstore_utils.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { pinned_ctx_ = Context::CPUPinned(0); } virtual ~Comm() { } /** * \brief init key with the data shape and storage shape */ virtual void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce( int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast( int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief broadcast src to dst[i] with target row_ids for every i * \param key the identifier key for the stored ndarray * \param src the source row_sparse ndarray to broadcast * \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast, where the row_ids are expected to be unique and sorted in row_id.data() * \param priority the priority of the operation */ virtual void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } /** * \brief Sets gradient compression parameters to be able to * perform reduce with compressed gradients */ void SetGradientCompression(std::shared_ptr<GradientCompression> gc) { gc_ = gc; } protected: Context pinned_ctx_; std::shared_ptr<GradientCompression> gc_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); // TODO(junwu) delete the following data member, now for benchmark only is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0); } virtual ~CommCPU() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int type = mshadow::kFloat32) override { // Delayed allocation - the dense merged buffer might not be used at all if push() // only sees sparse arrays bool delay_alloc = true; merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type); } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { auto& buf = merge_buf_[key]; const auto stype = src[0].storage_type(); // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { if (stype == kDefaultStorage) { return src[0]; } else { // With 'local' kvstore, we could store the weight on CPU while compute // the gradient on GPU when the weight is extremely large. // To avoiding copying the weight to the same context of the gradient, // we always copy the gradient to merged buf. NDArray& merged = buf.merged_buf(stype); CopyFromTo(src[0], &merged, priority); return merged; } } NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &buf_merged, priority); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()-1); for (size_t j = 0; j < src.size() - 1; ++j) { // allocate copy buffer buf.copy_buf[j] = NDArray( src[0].shape(), pinned_ctx_, false, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority); reduce[i] = buf.copy_buf[i-1]; const_vars[i-1] = reduce[i].var(); } Engine::Get()->PushAsync( [reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { ReduceSumCPU(reduce); on_complete(); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } else { // sparse reduce std::vector<Engine::VarHandle> const_vars(src.size()); std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray( src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; const_vars[i] = reduce[i].var(); } Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(), ResourceRequest(ResourceRequest::kTempSpace)); Engine::Get()->PushAsync( [reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { NDArray out = buf_merged; is_serial_push_? ReduceSumCPUExSerial(reduce, &out) : mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out); on_complete(); }, Context::CPU(), const_vars, {buf_merged.var(), rsc.var}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } return buf_merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // First copy data to pinned_ctx, then broadcast. // Note that kv.init initializes the data on pinned_ctx. // This branch indicates push() with ndarrays on gpus were called, // and the source is copied to gpu ctx. // Also indicates that buffers are already initialized during push(). auto& buf = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf, priority); for (auto d : dst) CopyFromTo(buf, d, priority); } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { using namespace mshadow; CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with src on gpu context not supported"; for (size_t i = 0; i < dst.size(); ++i) { NDArray* out = dst[i].first; NDArray row_id = dst[i].second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with row_indices on gpu context not supported"; // retain according to unique indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, src.shape(), src.ctx(), true, src.dtype(), src.aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); NDArray temp = retained_cpu; // get rid the of const qualifier op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); on_complete(); }, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()}, FnProperty::kNormal, priority, "KVStoreSparseRetain"); // if retained_cpu == out, CopyFromTo will ignore the copy operation CopyFromTo(retained_cpu, out, priority); } } private: // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray> &in_data) { MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, { std::vector<DType*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, DType>().dptr_; } size_t total = in_data[0].shape().Size(); ReduceSumCPUImpl(dptr, total); }); } // serial implementation of reduce sum for row sparse NDArray. inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) { using namespace rowsparse; using namespace mshadow; auto stype = out->storage_type(); CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype; size_t total_num_rows = 0; size_t num_in = in.size(); // skip the ones with empty indices and values std::vector<bool> skip(num_in, false); // the values tensor of the inputs MSHADOW_TYPE_SWITCH(out->dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { std::vector<Tensor<cpu, 2, DType>> in_vals(num_in); std::vector<Tensor<cpu, 1, IType>> in_indices(num_in); // offset to the values tensor of all inputs std::vector<size_t> offsets(num_in, 0); std::vector<size_t> num_rows(num_in, 0); for (size_t i = 0; i < num_in; i++) { if (!in[i].storage_initialized()) { skip[i] = true; continue; } auto size = in[i].aux_shape(kIdx).Size(); num_rows[i] = size; total_num_rows += size; in_vals[i] = in[i].data().FlatTo2D<cpu, DType>(); in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>(); } std::vector<IType> indices; indices.reserve(total_num_rows); // gather indices from all inputs for (size_t i = 0; i < num_in; i++) { for (size_t j = 0; j < num_rows[i]; j++) { indices.emplace_back(in_indices[i][j]); } } CHECK_EQ(indices.size(), total_num_rows); // dedup indices std::sort(indices.begin(), indices.end()); indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin()); // the one left are unique non-zero rows size_t nnr = indices.size(); // allocate memory for output out->CheckAndAlloc({Shape1(nnr)}); auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>(); auto val_data = out->data().FlatTo2D<cpu, DType>(); for (size_t i = 0; i < nnr; i++) { // copy indices back idx_data[i] = indices[i]; bool zeros = true; for (size_t j = 0; j < num_in; j++) { if (skip[j]) continue; size_t offset = offsets[j]; if (offset < num_rows[j]) { if (indices[i] == in_indices[j][offset]) { if (zeros) { Copy(val_data[i], in_vals[j][offset], nullptr); zeros = false; } else { val_data[i] += in_vals[j][offset]; } offsets[j] += 1; } } } } }); }); } template<typename DType> inline static void ReduceSumCPU( const std::vector<DType*> &dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i+=4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } template<typename DType> inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; /// \brief the merged buffer for the given storage type inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; bool is_serial_push_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) override { sorted_key_attrs_.emplace_back(key, shape, dtype); inited_ = false; } void InitBuffersAndComm(const std::vector<NDArray>& src) { if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } } const NDArray& ReduceRowSparse(int key, const std::vector<NDArray>& src, int priority) { auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); if (buf.copy_buf.empty()) { // initialize buffer for copying during reduce buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype()); } } CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); return buf_merged; } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // when this reduce is called from kvstore_dist, gc is not set // we don't do compression twice in dist_sync_device if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) { return ReduceCompressed(key, src, priority); } // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } InitBuffersAndComm(src); auto& buf = merge_buf_[key]; const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { CopyFromTo(src[0], &buf_merged, priority); std::vector<NDArray> reduce(src.size()); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size()-1); for (size_t i = 0; i < src.size()-1; ++i) { buf.copy_buf[i] = NDArray( buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype()); } } for (size_t i = 0; i < src.size()-1; ++i) { CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority); reduce[i+1] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); } else { // sparse reduce buf_merged = ReduceRowSparse(key, src, priority); } return buf_merged; } const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src, int priority) { InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { // one buf for each context buf.copy_buf.resize(src.size()); buf.compressed_recv_buf.resize(src.size()); buf.compressed_send_buf.resize(src.size()); buf.residual.resize(src.size()); for (size_t i = 0; i < src.size(); ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(), false, buf.merged.dtype()); buf.residual[i] = 0; int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size()); buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(), false, buf.merged.dtype()); buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size(); ++i) { // compress before copy // this is done even if the data is on same context as copy_buf because // we don't want the training to be biased towards data on this GPU gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority); if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) { CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority); } else { // avoid memory copy when they are on same context buf.compressed_recv_buf[i] = buf.compressed_send_buf[i]; } gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf_merged, priority); for (auto d : dst) { CopyFromTo(buf_merged, d, priority); } } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; for (size_t i = 0; i < dst.size(); ++i) { NDArray* out = dst[i].first; NDArray row_id = dst[i].second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx(), src.ctx()) << "row_id and src are expected to be on the same context"; // retain according to indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, out->shape(), src.ctx(), true, out->dtype(), out->aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } bool is_gpu = retained_gpu.ctx().dev_mask() == gpu::kDevMask; Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); using namespace mxnet::common; NDArray temp = retained_gpu; switch (temp.ctx().dev_mask()) { case cpu::kDevMask: { SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); break; } #if MXNET_USE_CUDA case gpu::kDevMask: { SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(), src, indices, kWriteTo, &temp); // wait for GPU operations to complete rctx.get_stream<gpu>()->Wait(); break; } #endif default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; } on_complete(); }, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()}, is_gpu ? FnProperty::kGPUPrioritized : FnProperty::kCPUPrioritized, priority, "KVStoreSparseRetain"); CopyFromTo(retained_gpu, out, priority); } } using KeyAttrs = std::tuple<int, TShape, int>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), []( const KeyAttrs& a, const KeyAttrs& b) { return std::get<1>(a).Size() > std::get<1>(b).Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) { const int key = std::get<0>(sorted_key_attrs_[i]); const TShape& shape = std::get<1>(sorted_key_attrs_[i]); const int type = std::get<2>(sorted_key_attrs_[i]); auto& buf = merge_buf_[key]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) { size_t size = it->second.second; if (size <= min_size) { ctx = it->second.first; min_size = size; } } // Delayed allocation - as the dense merged buffer might not be used at all if push() // only sees sparse arrays if (buf.merged.is_none()) { bool delay_alloc = true; buf.merged = NDArray(shape, ctx, delay_alloc, type); } ctx_info[ctx.dev_id].second += shape.Size(); } inited_ = true; } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_CUDA std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n*n); // Restores active device to what it was before EnableP2P mxnet::common::cuda::DeviceStore device_store; for (int i = 0; i < n; ++i) { device_store.SetDevice(gpus[i]); for (int j = 0; j < n; j++) { int access; cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0); if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i*n+j] = 1; } } } } if (enabled != n*(n-1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n*(n-1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i*n+j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the dense merged value for reduce and broadcast operations NDArray merged; /// \brief the gpu buffer for copy during reduce operation std::vector<NDArray> copy_buf; /// \brief the residual buffer for gradient compression std::vector<NDArray> residual; /// \brief the small buffer for compressed data in sender std::vector<NDArray> compressed_send_buf; /// \brief the small buffer for compressed data in receiver std::vector<NDArray> compressed_recv_buf; /// \brief the merged buffer for the given storage type (could be either dense or row_sparse) inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { CHECK(!merged.is_none()) << "unintialized merge buffer detected"; return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value for reduce and rowsparse broadcast operations NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; public: bool inited_; std::vector<KeyAttrs> sorted_key_attrs_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
sillyGPU.c
/* Tempo sequencial real 0m9,743s user 0m9,674s sys 0m0,030s real 0m9,695s user 0m9,673s sys 0m0,004s real 0m9,692s user 0m9,669s sys 0m0,011s real 0m9,649s user 0m9,631s sys 0m0,004s real 0m9,694s user 0m9,682s sys 0m0,000s Tempo paralelo - multicore real 0m2,720s user 0m15,143s sys 0m0,056s real 0m2,822s user 0m15,280s sys 0m0,040s real 0m2,624s user 0m15,217s sys 0m0,044s real 0m2,800s user 0m15,143s sys 0m0,056s real 0m2,680s user 0m15,143s sys 0m0,056s Tempo paralelo - GPU real 0m2,360s user 0m14,952s sys 0m0,033s real 0m2,465s user 0m14,981s sys 0m0,037s real 0m2,300s user 0m15,118s sys 0m0,046s real 0m2,279s user 0m15,029s sys 0m0,030s real 0m2,381s user 0m15,603s sys 0m0,091s */ #include <stdio.h> #include <stdlib.h> #include <omp.h> int main() { int i, j, n = 100000; // Allocate input, output and position arrays int *in = (int*) calloc(n, sizeof(int)); int *pos = (int*) calloc(n, sizeof(int)); int *out = (int*) calloc(n, sizeof(int)); #pragma omp target map(from:in[0:n]) #pragma omp teams distribute parallel for simd for(i=0; i < n; i++) in[i] = n-i; // Print input array // for(i=0; i < n; i++) // printf("%d ",in[i]); #pragma omp parallel for collapse(2) schedule(guided) for(i=0; i < n; i++) for(j=0; j < n; j++) if(in[i] > in[j]) pos[i]++; #pragma omp target map(from:in[0:n],pos[0:n]) map(to:out[0:n]) #pragma omp teams distribute parallel for simd for(i=0; i < n; i++) out[pos[i]] = in[i]; // print output array // for(i=0; i < n; i++) // printf("%d ",out[i]); #pragma omp parallel for schedule(guided) for(i=0; i < n; i++) if(i+1 != out[i]) { printf("test failed\n"); exit(0); } printf("test passed\n"); }
repeat_base.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2013, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: David Weese <david.weese@fu-berlin.de> // ========================================================================== #ifndef SEQAN_HEADER_REPEAT_BASE_H #define SEQAN_HEADER_REPEAT_BASE_H #if SEQAN_ENABLE_PARALLELISM #include <seqan/parallel.h> #endif // #if SEQAN_ENABLE_PARALLELISM namespace seqan { /** .Class.Repeat ..summary:Store information about a repeat. ..cat:Index ..signature:Repeat<TPos, TPeriod> ..param.TPos:Type to use for storing positions. ...metafunction:Metafunction.Value ..param.TPeriod:Type to use for storing the repeat period. ...default:1 ...metafunction:Metafunction.Size ..include:seqan/index.h ..see:Function.findRepeats .Memvar.Repeat#beginPosition ..summary:The begin position of the repeat of type $TPos$. ..class:Class.Repeat .Memvar.Repeat#endPosition ..summary:The end position of the repeat of type $TPos$. ..class:Class.Repeat .Memvar.Repeat#period ..summary:The period of the repeat of type $TSize$. ..class:Class.Repeat */ /*! * @class Repeat * * @headerfile seqan/index.h * * @brief Store information about a repeat. * * @signature Repeat<TPos, TPeriod> * * @tparam TPeriod Type to use for storing the repeat period. Default: 1 * @tparam TPos Type to use for storing positions. * * @see findRepeats * * @var VariableType Repeat::endPosition * * @brief The end position of the repeat of type <tt>TPos</tt>. * * @var VariableType Repeat::beginPosition * * @brief The begin position of the repeat of type <tt>TPos</tt>. * * @var VariableType Repeat::period * * @brief The period of the repeat of type <tt>TSize</tt>. */ template <typename TPos, typename TPeriod> struct Repeat { TPos beginPosition; TPos endPosition; TPeriod period; }; template <typename TPos, typename TPeriod> struct Value< Repeat<TPos, TPeriod> > { typedef TPos Type; }; template <typename TPos, typename TPeriod> struct Size< Repeat<TPos, TPeriod> > { typedef TPeriod Type; }; template <typename TSize> struct RepeatFinderParams { TSize minRepeatLen; TSize maxPeriod; }; // custom TSpec for our customized wotd-Index struct TRepeatFinder; template <typename TText> struct Cargo<Index<TText, IndexWotd<TRepeatFinder> > > { typedef Index<TText, IndexWotd<TRepeatFinder> > TIndex; typedef typename Size<TIndex>::Type TSize; typedef RepeatFinderParams<TSize> Type; }; // node predicate template <typename TText, typename TSpec> bool nodePredicate(Iter<Index<TText, IndexWotd<TRepeatFinder> >, TSpec> &it) { // return countOccurrences(it) * nodeDepth(it) >= cargo(container(it)).minRepeatLen; return countOccurrences(it) * repLength(it) >= cargo(container(it)).minRepeatLen; } // monotonic hull template <typename TText, typename TSpec> bool nodeHullPredicate(Iter<Index<TText, IndexWotd<TRepeatFinder> >, TSpec> &it) { // return nodeDepth(it) <= cargo(container(it)).maxPeriod; return repLength(it) <= cargo(container(it)).maxPeriod; } template <typename TPos> struct RepeatLess_ : public ::std::binary_function<TPos, TPos, bool> { // key less inline bool operator() (TPos const &a, TPos const &b) { return posLess(a, b); } }; template <typename TValue> inline bool _repeatMaskValue(TValue const &) { // TODO(holtgrew): Maybe use unknownValue<TValue>() instead of specializing for all alphabets, especially since we have Rna5 now and might want Rna5Q later. return false; } template <> inline bool _repeatMaskValue(Dna5 const &val) { return val == unknownValue<Dna5>(); // 'N' } template <> inline bool _repeatMaskValue(Dna5Q const &val) { return val == unknownValue<Dna5Q>(); // 'N' } template <> inline bool _repeatMaskValue(Iupac const &val) { return val == unknownValue<Iupac>(); // 'N' } /* template <> inline bool _repeatMaskValue(AminoAcid val) { return val == 'X'; } */ /** .Function.findRepeats ..summary:Search for repeats in a text. ..cat:Index ..signature:findRepeats(repeatString, text, minRepeatLength[, maxPeriod]) ..param.repeatString:A @Class.String@ of @Class.Repeat@ objects. ..param.text:The text to search repeats in. ...type:Class.String ...type:Class.StringSet ..param.minRepeatLength:The minimum length each reported repeat must have. ..param.maxPeriod:Optionally, the maximal period that reported repeats can have. ...default:1 ..remarks:Subsequences of undefined values/$N$s will always be reported. ..example.text:The following demonstrates finding repeats of period 1. ..example.code: String<Repeat<unsigned, unsigned> > repeats; Dna5String text = "CGATAAAACTNN"; // repeat 0 AAAA // repeat 1 NN findRepeats(repeats, text, 3); // ==> length(repeats) == 2 // ==> repeats[0] == {beginPosition: 4, endPosition: 8, period: 1} // ==> repeats[1] == {beginPosition: 11, endPosition: 13, period: 1} ..see:Function.unknownValue ..include:seqan/index.h ..see:Class.Repeat */ /*! * @fn findRepeats * * @headerfile seqan/index.h * * @brief Search for repeats in a text. * * @signature findRepeats(repeatString, text, minRepeatLength[, maxPeriod]) * * @param text The text to search repeats in. Types: @link SequenceConcept @endlink * @param repeatString A @link String @endlink of @link Repeat @endlink objects. * @param maxPeriod Optionally, the maximal period that reported repeats can * have. Default: 1 * @param minRepeatLength The minimum length each reported repeat must have. * * @section Remarks * * Subsequences of undefined values/<tt>N</tt>s will always be reported. * * @section Examples * * The following demonstrates finding repeats of period 1. * * @code{.cpp} * String<Repeat<unsigned, unsigned> > repeats; * Dna5String text = "CGATAAAACTNN"; * // repeat 0 AAAA * // repeat 1 NN * * findRepeats(repeats, text, 3); * // ==> length(repeats) == 2 * // ==> repeats[0] == {beginPosition: 4, endPosition: 8, period: 1} * // ==> repeats[1] == {beginPosition: 11, endPosition: 13, period: 1} * @endcode * @see unknownValue * @see Repeat */ // TODO(holtgrew): minRepeatLength is 1-off. // period-1 optimization template <typename TRepeatStore, typename TString, typename TRepeatSize> inline void findRepeats(TRepeatStore &repString, TString const &text, TRepeatSize minRepeatLen) { typedef typename Value<TRepeatStore>::Type TRepeat; typedef typename Iterator<TString const>::Type TIterator; typedef typename Size<TString>::Type TSize; #if SEQAN_ENABLE_PARALLELISM typedef typename Value<TString>::Type TValue; if (length(text) > (TSize)(omp_get_max_threads() * 2 * minRepeatLen)) { // std::cerr << ">>> PARALLEL WABOOGIE!" << std::endl; // std::cerr << "omp_get_max_threads() == " << omp_get_max_threads() << std::endl; // Parallel case. // NOTE(holtgrew): The minimum text length check above makes it impossible that more than two chunks are // required to form an otherwise too short repeat. // TODO(holtgrew): Load balancing? Probably not worth it. String<TSize> splitters; String<TRepeatStore> threadLocalStores; // Each threads finds repeats on its chunk in parallel. #pragma omp parallel { // We have to determine the number of available threads at this point. We will use the number of thread // local stores to determin the number of available threads later on. #pragma omp master { // std::cerr << "omp_get_num_threads() == " << omp_get_num_threads() << std::endl; computeSplitters(splitters, length(text), omp_get_num_threads()); resize(threadLocalStores, omp_get_num_threads()); } // end of #pragma omp master #pragma omp barrier int const t = omp_get_thread_num(); TRepeatStore & store = threadLocalStores[t]; TRepeat rep; rep.beginPosition = 0; rep.endPosition = 0; rep.period = 1; // Flags used for force-adding repeats for the chunks that have a left/right neighbour. bool forceFirst = t > 0; bool forceLast = (t + 1) < omp_get_num_threads(); // #pragma omp critical // std::cerr << "omp_get_num_threads() == " << omp_get_num_threads() << std::endl; TIterator it = iter(text, splitters[t], Standard()); TIterator itEnd = iter(text, splitters[t + 1], Standard()); if (it != itEnd) { TValue last = *it; TSize repLeft = 0; TSize repRight = 1; for (++it; it != itEnd; ++it, ++repRight) { if (*it != last) { // #pragma omp critical // std::cerr << "t == " << t << ", last == " << last << ", repRight = " << repRight << ", repLeft == " << repLeft << ", minRepeatLen = " << minRepeatLen << ", forceFirst = " << forceFirst << std::endl; if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen || forceFirst) { forceFirst = false; // insert repeat rep.beginPosition = splitters[t] + repLeft; rep.endPosition = splitters[t] + repRight; // #pragma omp critical // std::cerr << " t == " << t << ", append" << std::endl; appendValue(store, rep); } repLeft = repRight; last = *it; } } // #pragma omp critical // std::cerr << "t == " << t << ", last == " << last << ", repRight = " << repRight << ", repLeft == " << repLeft << ", minRepeatLen = " << minRepeatLen << ", forceLast = " << forceLast << std::endl; if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen || forceLast) { // Insert repeat but only if it is not already in there. if (empty(store) || (back(store).beginPosition != repLeft && back(store).endPosition != repRight)) { rep.beginPosition = splitters[t] + repLeft; rep.endPosition = splitters[t] + repRight; // #pragma omp critical // std::cerr << " t == " << t << ", append" << std::endl; appendValue(store, rep); } } } } // end of #pragma omp parallel // std::cerr << ",-- REPEATS BEFORE MENDING\n"; // for (unsigned i = 0; i < length(threadLocalStores); ++i) // { // std::cerr << "| i = " << i << std::endl; // for (unsigned j = 0; j < length(threadLocalStores[i]); ++j) // std::cerr << "| threadLocalStores[" << i << "][" << j << "] == {" << threadLocalStores[i][j].beginPosition << ", " << threadLocalStores[i][j].endPosition << "}" << std::endl; // } // std::cerr << "`--" << std::endl; // Mend the splice points. // // We will copy out infixes described by fromPositions. String<Pair<TSize> > fromPositions; resize(fromPositions, length(threadLocalStores)); for (unsigned i = 0; i < length(fromPositions); ++i) { fromPositions[i].i1 = 0; fromPositions[i].i2 = length(threadLocalStores[i]); } // First, merge repeats spanning blocks. Do this iteratively until all has been merged. bool anyChange; do { anyChange = false; int lastNonEmpty = -1; for (unsigned i = 0; i < length(threadLocalStores); ++i) { if (fromPositions[i].i1 == fromPositions[i].i2) continue; // Skip empty buckets. if (lastNonEmpty != -1) { bool const adjacent = back(threadLocalStores[lastNonEmpty]).endPosition == front(threadLocalStores[i]).beginPosition; bool const charsEqual = text[back(threadLocalStores[lastNonEmpty]).beginPosition] == text[front(threadLocalStores[i]).beginPosition]; if (adjacent && charsEqual) { anyChange = true; back(threadLocalStores[lastNonEmpty]).endPosition = front(threadLocalStores[i]).endPosition; fromPositions[i].i1 += 1; } } if (fromPositions[i].i1 != fromPositions[i].i2) lastNonEmpty = i; } } while (anyChange); // Then, remove any repeats in the beginning and end of blocks that are too short. for (unsigned i = 0; i < length(threadLocalStores); ++i) { if (fromPositions[i].i1 == fromPositions[i].i2) continue; unsigned j = fromPositions[i].i1; TRepeatSize len = threadLocalStores[i][j].endPosition - threadLocalStores[i][j].beginPosition; if (!_repeatMaskValue(text[threadLocalStores[i][j].beginPosition]) && // Never remove mask value. len <= minRepeatLen) fromPositions[i].i1 += 1; if (fromPositions[i].i1 == fromPositions[i].i2) continue; j = fromPositions[i].i2 - 1; len = threadLocalStores[i][j].endPosition - threadLocalStores[i][j].beginPosition; if (!_repeatMaskValue(text[threadLocalStores[i][j].beginPosition]) && // Never remove mask value. len <= minRepeatLen) fromPositions[i].i2 -= 1; } // Last, build splitters for output in parallel. String<unsigned> outSplitters; appendValue(outSplitters, 0); for (unsigned i = 0; i < length(threadLocalStores); ++i) appendValue(outSplitters, back(outSplitters) + fromPositions[i].i2 - fromPositions[i].i1); // std::cerr << ",-- REPEATS AFTER MENDING\n"; // for (unsigned i = 0; i < length(threadLocalStores); ++i) // { // std::cerr << "| i = " << i << std::endl; // std::cerr << "`--, fromPositions[" << i << "] = (" << fromPositions[i].i1 << ", " << fromPositions[i].i2 << std::endl; // for (unsigned j = 0; j < length(threadLocalStores[i]); ++j) // std::cerr << " | threadLocalStores[" << i << "][" << j << "] == {" << threadLocalStores[i][j].beginPosition << ", " << threadLocalStores[i][j].endPosition << "}" << std::endl; // } // std::cerr << " `--" << std::endl; // Allocate memory. clear(repString); resize(repString, back(outSplitters)); // Copy back the repeats in parallel. unsigned nt = length(threadLocalStores); (void) nt; // Otherwise, GCC 4.6 warns, does not see it used in pragma clause below. #pragma omp parallel num_threads(nt) { int const t = omp_get_thread_num(); arrayCopy(iter(threadLocalStores[t], fromPositions[t].i1, Standard()), iter(threadLocalStores[t], fromPositions[t].i2, Standard()), iter(repString, outSplitters[t], Standard())); } // end of #pragma omp parallel } else { #endif // #if SEQAN_ENABLE_PARALLELISM // Sequential case. TRepeat rep; rep.period = 1; clear(repString); TIterator it = begin(text, Standard()); TIterator itEnd = end(text, Standard()); if (it == itEnd) return; TSize repLen = 1; for (++it; it != itEnd; ++it) { if (*it != *(it-1)) { if (_repeatMaskValue(*(it-1)) || repLen > (TSize)minRepeatLen) { // insert repeat rep.endPosition = it - begin(text, Standard()); rep.beginPosition = rep.endPosition - repLen; // ::std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<::std::endl; appendValue(repString, rep); } repLen = 1; } else ++repLen; } if (_repeatMaskValue(*(it-1)) || repLen > (TSize)minRepeatLen) { // insert repeat rep.endPosition = length(text); rep.beginPosition = rep.endPosition - repLen; // ::std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<::std::endl; appendValue(repString, rep); } #if SEQAN_ENABLE_PARALLELISM } #endif // #if SEQAN_ENABLE_PARALLELISM // #pragma omp critical // { // std::cerr << "thread #" << omp_get_thread_num() << " REPEATS:"; // for (unsigned i = 0; i < length(repString); ++i) { // std::cerr << " (" << repString[i].beginPosition << ", " << repString[i].endPosition << ", " << repString[i].period << ")"; // } // std::cerr << std::endl; // } } // TODO(holtgrew): Why for TString const and StringSet<> const? template <typename TRepeatStore, typename TString, typename TSpec, typename TRepeatSize> inline void findRepeats(TRepeatStore &repString, StringSet<TString, TSpec> const &text, TRepeatSize minRepeatLen) { typedef typename Value<TRepeatStore>::Type TRepeat; typedef typename Iterator<TString>::Type TIterator; typedef typename Value<TString>::Type TValue; typedef typename Size<TString>::Type TSize; TRepeat rep; rep.period = 1; clear(repString); for (unsigned i = 0; i < length(text); ++i) { TIterator it = begin(text[i], Standard()); TIterator itEnd = end(text[i], Standard()); if (it == itEnd) continue; TValue last = *it; TSize repLeft = 0; TSize repRight = 1; rep.beginPosition.i1 = i; rep.endPosition.i1 = i; for (++it; it != itEnd; ++it, ++repRight) { if (last != *it) { if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen) { // insert repeat rep.beginPosition.i2 = repLeft; rep.endPosition.i2 = repRight; // ::std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<::std::endl; appendValue(repString, rep); } repLeft = repRight; last = *it; } } if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen) { // insert repeat rep.beginPosition.i2 = repLeft; rep.endPosition.i2 = repRight; // ::std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<::std::endl; appendValue(repString, rep); } } } // main function template <typename TRepeatStore, typename TText, typename TRepeatSize, typename TPeriodSize> void findRepeats(TRepeatStore &repString, TText const &text, TRepeatSize minRepeatLen, TPeriodSize maxPeriod) { typedef Index<TText, IndexWotd<TRepeatFinder> > TIndex; typedef typename Size<TIndex>::Type TSize; typedef typename Iterator<TIndex, TopDown<ParentLinks<> > >::Type TNodeIterator; typedef typename Fibre<TIndex, FibreSA>::Type const TSA; typedef typename Infix<TSA>::Type TOccString; typedef typename Iterator<TOccString>::Type TOccIterator; typedef typename Value<TRepeatStore>::Type TRepeat; typedef typename Value<TOccString>::Type TOcc; typedef ::std::map<TOcc,TRepeat,RepeatLess_<TOcc> > TRepeatList; if (maxPeriod < 1) return; if (maxPeriod == 1) { findRepeats(repString, text, minRepeatLen); return; } TIndex index(text); TRepeatList list; // set repeat finder parameters cargo(index).minRepeatLen = minRepeatLen; cargo(index).maxPeriod = maxPeriod; TNodeIterator nodeIt(index); TOccIterator itA, itB, itRepBegin, itEnd; TRepeat rep; for (; !atEnd(nodeIt); goNext(nodeIt)) { if (isRoot(nodeIt)) continue; // get occurrences TOccString occ = getOccurrences(nodeIt); itA = begin(occ, Standard()); itEnd = end(occ, Standard()); itRepBegin = itB = itA; TSize repLen = repLength(nodeIt); // representative length if ((TSize)minRepeatLen <= repLen) continue; TSize diff, period = 0; // period of current repeat TSize repeatLen = 0; // overall length of current repeat TSize minLen = minRepeatLen - repLen; // minimum repeat length minus length of representative for (++itB; itB != itEnd; ++itB) { diff = posSub(*itB, *itA); if (diff != period || getSeqNo(*itA) != getSeqNo(*itB)) { // is the repeat long enough? if (repeatLen >= minLen) // is the repeat self overlapping or connected? if (parentRepLength(nodeIt) < period && period <= repLen) { // insert repeat rep.beginPosition = *itRepBegin; rep.endPosition = posAdd(*itA, period); rep.period = period; // ::std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<::std::endl; list.insert(::std::pair<TOcc,TRepeat>(rep.beginPosition, rep)); } itRepBegin = itA; period = diff; repeatLen = 0; } repeatLen += period; itA = itB; } // is the last repeat long enough? if (repeatLen >= minLen) // is the repeat self overlapping or connected? if (parentRepLength(nodeIt) < period && period <= repLen) { // insert repeat rep.beginPosition = *itRepBegin; rep.endPosition = posAdd(*itA, period); rep.period = period; // ::std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<::std::endl; list.insert(::std::pair<TOcc,TRepeat>(rep.beginPosition, rep)); } } // copy low-complex regions to result string clear(repString); reserve(repString, list.size(), Exact()); typename TRepeatList::const_iterator lit = list.begin(); typename TRepeatList::const_iterator litEnd = list.end(); for (TSize i = 0; lit != litEnd; ++lit, ++i) appendValue(repString, (*lit).second); } } // namespace seqan #endif
GB_unaryop__lnot_int64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_uint64 // op(A') function: GB_tran__lnot_int64_uint64 // C type: int64_t // A type: uint64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_uint64 ( int64_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB029-truedep1-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This program has data races due to true dependence within the loop at 63. Data race pair: a[i+1]@64:5 vs. a[i]@64:12 */ #include "omprace.h" #include <omp.h> #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { omprace_init(); int i; int len=100; int a[100]; for (i=0;i<len;i++) a[i]=i; #pragma omp parallel for for (i=0;i<len-1;i++) a[i+1]=a[i]+1; printf("a[50]=%d\n", a[50]); omprace_fini(); return 0; }
cancel-for-1.c
/* { dg-do run } */ /* { dg-set-target-env-var OMP_CANCELLATION "true" } */ #include <stdlib.h> #include <omp.h> int main () { #pragma omp parallel num_threads (32) { int i; #pragma omp for for (i = 0; i < 1000; ++i) { #pragma omp cancel for if (omp_get_cancellation ()) abort (); } } return 0; }
fill_ints.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <complex.h> #include <assert.h> #include "config.h" #include "cint.h" #include "vhf/fblas.h" #include "pbc/optimizer.h" #define INTBUFMAX 1000 #define INTBUFMAX10 8000 #define IMGBLK 80 #define OF_CMPLX 2 #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define MAX(X,Y) ((X)>(Y)?(X):(Y)) int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); static int shloc_partition(int *kshloc, int *ao_loc, int ksh0, int ksh1, int dkmax) { int ksh; int nloc = 0; int loclast = ao_loc[ksh0]; kshloc[0] = ksh0; for (ksh = ksh0+1; ksh < ksh1; ksh++) { assert(ao_loc[ksh+1] - ao_loc[ksh] < dkmax); if (ao_loc[ksh+1] - loclast > dkmax) { nloc += 1; kshloc[nloc] = ksh; loclast = ao_loc[ksh]; } } nloc += 1; kshloc[nloc] = ksh1; return nloc; } static void shift_bas(double *env_loc, double *env, double *Ls, int ptr, int iL) { env_loc[ptr+0] = env[ptr+0] + Ls[iL*3+0]; env_loc[ptr+1] = env[ptr+1] + Ls[iL*3+1]; env_loc[ptr+2] = env[ptr+2] + Ls[iL*3+2]; } static void sort3c_kks1(double complex *out, double *bufr, double *bufi, int *kptij_idx, int *shls_slice, int *ao_loc, int nkpts, int nkpts_ij, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t njk = naoj * naok; const size_t nijk = njk * naoi; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; out += (ip * naoj + jp) * naok; int i, j, k, kk, ik, jk, ksh, ic, dk, dijk; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts_ij; kk++) { ik = kptij_idx[kk] / nkpts; jk = kptij_idx[kk] % nkpts; off = (ik*nkpts+jk) * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbr = bufr + off + dijk*ic; pbi = bufi + off + dijk*ic; for (j = 0; j < dj; j++) { for (k = 0; k < dk; k++) { for (i = 0; i < di; i++) { pout[i*njk+k] = pbr[k*dij+i] + pbi[k*dij+i]*_Complex_I; } } pout += naok; pbr += di; pbi += di; } } off += dijk * comp; } out += nijk * comp; } } static void _nr3c_fill_kk(int (*intor)(), void (*fsort)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const double ND1 = -1; jsh += jsh0; ish += ish0; int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS]; int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS]; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; int dkmax = INTBUFMAX / dij; int kshloc[ksh1-ksh0+1]; int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax); int i, m, msh0, msh1, dijm, dijmc, dijmk, empty; int ksh, dk, iL0, iL, jL, iLcount; int shls[3]; double *bufkk_r, *bufkk_i, *bufkL_r, *bufkL_i, *bufL, *pbuf, *cache; int (*fprescreen)(); if (pbcopt != NULL) { fprescreen = pbcopt->fprescreen; } else { fprescreen = PBCnoscreen; } shls[0] = ish; shls[1] = jsh; for (m = 0; m < nkshloc; m++) { msh0 = kshloc[m]; msh1 = kshloc[m+1]; dkmax = ao_loc[msh1] - ao_loc[msh0]; dijm = dij * dkmax; dijmc = dijm * comp; dijmk = dijmc * nkpts; bufkk_r = buf; bufkk_i = bufkk_r + (size_t)nkpts * dijmk; bufkL_r = bufkk_i + (size_t)nkpts * dijmk; bufkL_i = bufkL_r + (size_t)MIN(nimgs,IMGBLK) * dijmk; bufL = bufkL_i + (size_t)MIN(nimgs,IMGBLK) * dijmk; cache = bufL + (size_t)nimgs * dijmc; for (i = 0; i < nkpts*dijmk*OF_CMPLX; i++) { bufkk_r[i] = 0; } for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) { iLcount = MIN(IMGBLK, nimgs - iL0); for (iL = iL0; iL < iL0+iLcount; iL++) { shift_bas(env_loc, env, Ls, iptrxyz, iL); pbuf = bufL; for (jL = 0; jL < nimgs; jL++) { shift_bas(env_loc, env, Ls, jptrxyz, jL); if ((*fprescreen)(shls, pbcopt, atm, bas, env_loc)) { for (ksh = msh0; ksh < msh1; ksh++) { shls[2] = ksh; if ((*intor)(pbuf, NULL, shls, atm, natm, bas, nbas, env_loc, cintopt, cache)) { empty = 0; } dk = ao_loc[ksh+1] - ao_loc[ksh]; pbuf += dij*dk * comp; } } else { for (i = 0; i < dijmc; i++) { pbuf[i] = 0; } pbuf += dijmc; } } dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &nimgs, &D1, bufL, &dijmc, expkL_r, &nimgs, &D0, bufkL_r+(iL-iL0)*(size_t)dijmk, &dijmc); dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &nimgs, &D1, bufL, &dijmc, expkL_i, &nimgs, &D0, bufkL_i+(iL-iL0)*(size_t)dijmk, &dijmc); } // iL in range(0, nimgs) // conj(exp(1j*dot(h,k))) dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount, &D1, bufkL_r, &dijmk, expkL_r+iL0, &nimgs, &D1, bufkk_r, &dijmk); dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount, &D1, bufkL_i, &dijmk, expkL_i+iL0, &nimgs, &D1, bufkk_r, &dijmk); dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount, &D1, bufkL_i, &dijmk, expkL_r+iL0, &nimgs, &D1, bufkk_i, &dijmk); dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount, &ND1, bufkL_r, &dijmk, expkL_i+iL0, &nimgs, &D1, bufkk_i, &dijmk); } (*fsort)(out, bufkk_r, bufkk_i, kptij_idx, shls_slice, ao_loc, nkpts, nkpts_ij, comp, ish, jsh, msh0, msh1); } } /* ('...LM,kL,lM->...kl', int3c, exp_kL, exp_kL) */ void PBCnr3c_fill_kks1(int (*intor)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr3c_fill_kk(intor, &sort3c_kks1, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } static void sort3c_kks2_igtj(double complex *out, double *bufr, double *bufi, int *kptij_idx, int *shls_slice, int *ao_loc, int nkpts, int nkpts_ij, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; assert(naoi == naoj); const size_t njk = naoj * naok; const size_t nijk = njk * naoi; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; double complex *outij = out + (ip * naoj + jp) * naok; double complex *outji = out + (jp * naoj + ip) * naok; int i, j, k, kk, ik, jk, ksh, ic, dk, dijk; size_t offij, offji; double *pbij_r, *pbij_i, *pbji_r, *pbji_i; double complex *poutij, *poutji; for (kk = 0; kk < nkpts_ij; kk++) { ik = kptij_idx[kk] / nkpts; jk = kptij_idx[kk] % nkpts; offij = (ik*nkpts+jk) * dijmc; offji = (jk*nkpts+ik) * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { poutij = outij + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; poutji = outji + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbij_r = bufr + offij + dijk*ic; pbij_i = bufi + offij + dijk*ic; pbji_r = bufr + offji + dijk*ic; pbji_i = bufi + offji + dijk*ic; for (j = 0; j < dj; j++) { for (k = 0; k < dk; k++) { for (i = 0; i < di; i++) { poutij[i*njk +k] = pbij_r[k*dij+i] + pbij_i[k*dij+i]*_Complex_I; poutji[i*naok+k] = pbji_r[k*dij+i] - pbji_i[k*dij+i]*_Complex_I; } } poutij += naok; poutji += njk; pbij_r += di; pbij_i += di; pbji_r += di; pbji_i += di; } } offij += dijk * comp; offji += dijk * comp; } outij += nijk * comp; outji += nijk * comp; } } /* ('...LM,kL,lM->...kl', int3c, exp_kL, exp_kL) */ void PBCnr3c_fill_kks2(int (*intor)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { int ip = ish + shls_slice[0]; int jp = jsh + shls_slice[2] - nbas; if (ip > jp) { _nr3c_fill_kk(intor, &sort3c_kks2_igtj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } else if (ip == jp) { _nr3c_fill_kk(intor, &sort3c_kks1, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } } static void sort3c_ks1(double complex *out, double *bufr, double *bufi, int *shls_slice, int *ao_loc, int nkpts, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t njk = naoj * naok; const size_t nijk = njk * naoi; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; out += (ip * naoj + jp) * naok; int i, j, k, kk, ksh, ic, dk, dijk; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts; kk++) { off = kk * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbr = bufr + off + dijk*ic; pbi = bufi + off + dijk*ic; for (j = 0; j < dj; j++) { for (k = 0; k < dk; k++) { for (i = 0; i < di; i++) { pout[i*njk+k] = pbr[k*dij+i] + pbi[k*dij+i]*_Complex_I; } } pout += naok; pbr += di; pbi += di; } } off += dijk * comp; } out += nijk * comp; } } /* ('...LM,kL,kM->...k', int3c, exp_kL, exp_kL) */ static void _nr3c_fill_k(int (*intor)(), void (*fsort)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const char TRANS_N = 'N'; const double D1 = 1; jsh += jsh0; ish += ish0; int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS]; int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS]; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; int dkmax = INTBUFMAX10 / dij; int kshloc[ksh1-ksh0+1]; int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax); int i, m, msh0, msh1, dijmc, empty; size_t dijmk; int ksh, dk, iL, jL, jLcount; int shls[3]; double *bufexp_r = buf; double *bufexp_i = bufexp_r + nimgs * nkpts; double *bufk_r = bufexp_i + nimgs * nkpts; double *bufk_i, *bufL, *pbuf, *cache; int (*fprescreen)(); if (pbcopt != NULL) { fprescreen = pbcopt->fprescreen; } else { fprescreen = PBCnoscreen; } shls[0] = ish; shls[1] = jsh; for (m = 0; m < nkshloc; m++) { msh0 = kshloc[m]; msh1 = kshloc[m+1]; dkmax = ao_loc[msh1] - ao_loc[msh0]; dijmc = dij * dkmax * comp; dijmk = dijmc * nkpts; bufk_i = bufk_r + dijmk; bufL = bufk_i + dijmk; cache = bufL + nimgs * dijmc; for (i = 0; i < dijmk*OF_CMPLX; i++) { bufk_r[i] = 0; } for (iL = 0; iL < nimgs; iL++) { shift_bas(env_loc, env, Ls, iptrxyz, iL); pbuf = bufL; jLcount = 0; for (jL = 0; jL < nimgs; jL++) { shift_bas(env_loc, env, Ls, jptrxyz, jL); if ((*fprescreen)(shls, pbcopt, atm, bas, env_loc)) { for (ksh = msh0; ksh < msh1; ksh++) { shls[2] = ksh; if ((*intor)(pbuf, NULL, shls, atm, natm, bas, nbas, env_loc, cintopt, cache)) { empty = 0; } dk = ao_loc[ksh+1] - ao_loc[ksh]; pbuf += dij*dk * comp; } // ('k,kL->kL', conj(expkL[iL]), expkL) for (i = 0; i < nkpts; i++) { bufexp_r[i*nimgs+jLcount] = expkL_r[i*nimgs+jL] * expkL_r[i*nimgs+iL]; bufexp_r[i*nimgs+jLcount]+= expkL_i[i*nimgs+jL] * expkL_i[i*nimgs+iL]; bufexp_i[i*nimgs+jLcount] = expkL_i[i*nimgs+jL] * expkL_r[i*nimgs+iL]; bufexp_i[i*nimgs+jLcount]-= expkL_r[i*nimgs+jL] * expkL_i[i*nimgs+iL]; } jLcount++; } } dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &jLcount, &D1, bufL, &dijmc, bufexp_r, &nimgs, &D1, bufk_r, &dijmc); dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &jLcount, &D1, bufL, &dijmc, bufexp_i, &nimgs, &D1, bufk_i, &dijmc); } // iL in range(0, nimgs) (*fsort)(out, bufk_r, bufk_i, shls_slice, ao_loc, nkpts, comp, ish, jsh, msh0, msh1); } } /* ('...LM,kL,kM->...k', int3c, exp_kL, exp_kL) */ void PBCnr3c_fill_ks1(int (*intor)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr3c_fill_k(intor, sort3c_ks1, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } static void sort3c_ks2_igtj(double complex *out, double *bufr, double *bufi, int *shls_slice, int *ao_loc, int nkpts, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok; int i, j, k, ij, kk, ksh, ic, dk, dijk; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts; kk++) { off = kk * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbr = bufr + off + dijk*ic; pbi = bufi + off + dijk*ic; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { ij = j * di + i; for (k = 0; k < dk; k++) { pout[j*naok+k] = pbr[k*dij+ij] + pbi[k*dij+ij]*_Complex_I; } } pout += (i+ao_loc[ish]+1) * naok; } } off += dijk * comp; } out += nijk * comp; } } static void sort3c_ks2_ieqj(double complex *out, double *bufr, double *bufi, int *shls_slice, int *ao_loc, int nkpts, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok; int i, j, k, ij, kk, ksh, ic, dk, dijk; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts; kk++) { off = kk * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbr = bufr + off + dijk*ic; pbi = bufi + off + dijk*ic; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { ij = j * di + i; for (k = 0; k < dk; k++) { pout[j*naok+k] = pbr[k*dij+ij] + pbi[k*dij+ij]*_Complex_I; } } pout += (i+ao_loc[ish]+1) * naok; } } off += dijk * comp; } out += nijk * comp; } } /* ('...LM,kL,kM->...k', int3c, exp_kL, exp_kL) */ void PBCnr3c_fill_ks2(int (*intor)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { int ip = ish + shls_slice[0]; int jp = jsh + shls_slice[2] - nbas; if (ip > jp) { _nr3c_fill_k(intor, &sort3c_ks2_igtj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } else if (ip == jp) { _nr3c_fill_k(intor, &sort3c_ks2_ieqj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } } static void sort3c_gs1(double *out, double *in, int *shls_slice, int *ao_loc, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t njk = naoj * naok; const size_t nijk = njk * naoi; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; out += (ip * naoj + jp) * naok; int i, j, k, ksh, ic, dk, dijk; double *pin, *pout; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk * ic + ao_loc[ksh]-ao_loc[ksh0]; pin = in + dijk * ic; for (j = 0; j < dj; j++) { for (i = 0; i < di; i++) { for (k = 0; k < dk; k++) { pout[i*njk+k] = pin[k*dij+i]; } } pout += naok; pin += di; } } in += dijk * comp; } } static void _nr3c_fill_g(int (*intor)(), void (*fsort)(), double *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; jsh += jsh0; ish += ish0; int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS]; int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS]; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; int dkmax = INTBUFMAX10 / dij / 2 * MIN(IMGBLK,nimgs); int kshloc[ksh1-ksh0+1]; int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax); int i, m, msh0, msh1, dijm; int ksh, dk, iL, jL, dijkc; int shls[3]; int dijmc = dij * dkmax * comp; double *bufL = buf + dijmc; double *cache = bufL + dijmc; double *pbuf; int (*fprescreen)(); if (pbcopt != NULL) { fprescreen = pbcopt->fprescreen; } else { fprescreen = PBCnoscreen; } shls[0] = ish; shls[1] = jsh; for (m = 0; m < nkshloc; m++) { msh0 = kshloc[m]; msh1 = kshloc[m+1]; dkmax = ao_loc[msh1] - ao_loc[msh0]; dijm = dij * dkmax; dijmc = dijm * comp; for (i = 0; i < dijmc; i++) { bufL[i] = 0; } for (iL = 0; iL < nimgs; iL++) { shift_bas(env_loc, env, Ls, iptrxyz, iL); for (jL = 0; jL < nimgs; jL++) { shift_bas(env_loc, env, Ls, jptrxyz, jL); if ((*fprescreen)(shls, pbcopt, atm, bas, env_loc)) { pbuf = bufL; for (ksh = msh0; ksh < msh1; ksh++) { shls[2] = ksh; dk = ao_loc[ksh+1] - ao_loc[ksh]; dijkc = dij*dk * comp; if ((*intor)(buf, NULL, shls, atm, natm, bas, nbas, env_loc, cintopt, cache)) { for (i = 0; i < dijkc; i++) { pbuf[i] += buf[i]; } } pbuf += dijkc; } } } } // iL in range(0, nimgs) (*fsort)(out, bufL, shls_slice, ao_loc, comp, ish, jsh, msh0, msh1); } } /* ('...LM->...', int3c) */ void PBCnr3c_fill_gs1(int (*intor)(), double *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr3c_fill_g(intor, &sort3c_gs1, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } static void sort3c_gs2_igtj(double *out, double *in, int *shls_slice, int *ao_loc, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok; int i, j, k, ij, ksh, ic, dk, dijk; double *pin, *pout; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk * ic + ao_loc[ksh]-ao_loc[ksh0]; pin = in + dijk * ic; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { ij = j * di + i; for (k = 0; k < dk; k++) { pout[j*naok+k] = pin[k*dij+ij]; } } pout += (i+ao_loc[ish]+1) * naok; } } in += dijk * comp; } } static void sort3c_gs2_ieqj(double *out, double *in, int *shls_slice, int *ao_loc, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dij = di * di; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok; int i, j, k, ij, ksh, ic, dk, dijk; double *pin, *pout; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk * ic + ao_loc[ksh]-ao_loc[ksh0]; pin = in + dijk * ic; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { ij = j * di + i; for (k = 0; k < dk; k++) { pout[j*naok+k] = pin[k*dij+ij]; } } pout += (i+ao_loc[ish]+1) * naok; } } in += dijk * comp; } } /* ('...LM->...', int3c) */ void PBCnr3c_fill_gs2(int (*intor)(), double *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { int ip = ish + shls_slice[0]; int jp = jsh + shls_slice[2] - nbas; if (ip > jp) { _nr3c_fill_g(intor, &sort3c_gs2_igtj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } else if (ip == jp) { _nr3c_fill_g(intor, &sort3c_gs2_ieqj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } } int PBCsizeof_env(int *shls_slice, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; int ish, ia, np, nc; int nenv = 0; for (ish = ish0; ish < ish1; ish++) { ia = bas[ATOM_OF +ish*BAS_SLOTS]; nenv = MAX(atm[PTR_COORD+ia*ATM_SLOTS]+3, nenv); np = bas[NPRIM_OF+ish*BAS_SLOTS]; nc = bas[NCTR_OF +ish*BAS_SLOTS]; nenv = MAX(bas[PTR_EXP +ish*BAS_SLOTS]+np, nenv); nenv = MAX(bas[PTR_COEFF+ish*BAS_SLOTS]+np*nc, nenv); } return nenv; } void PBCnr3c_drv(int (*intor)(), void (*fill)(), double complex *eri, int nkpts_ij, int nkpts, int comp, int nimgs, double *Ls, double complex *expkL, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env, int nenv) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; double *expkL_r = malloc(sizeof(double) * nimgs*nkpts * OF_CMPLX); double *expkL_i = expkL_r + nimgs*nkpts; int i; for (i = 0; i < nimgs*nkpts; i++) { expkL_r[i] = creal(expkL[i]); expkL_i[i] = cimag(expkL[i]); } size_t count; if (fill == &PBCnr3c_fill_kks1 || fill == &PBCnr3c_fill_kks2) { int dijk =(GTOmax_shell_dim(ao_loc, shls_slice+0, 1) * GTOmax_shell_dim(ao_loc, shls_slice+2, 1) * GTOmax_shell_dim(ao_loc, shls_slice+4, 1)); count = nkpts*nkpts * OF_CMPLX + nkpts*MIN(nimgs,IMGBLK) * OF_CMPLX + nimgs; // MAX(INTBUFMAX, dijk) to ensure buffer is enough for at least one (i,j,k) shell count*= MAX(INTBUFMAX, dijk) * comp; } else { count = (nkpts * OF_CMPLX + nimgs) * INTBUFMAX10 * comp; count+= nimgs * nkpts * OF_CMPLX; } const int cache_size = GTOmax_cache_size(intor, shls_slice, 3, atm, natm, bas, nbas, env); #pragma omp parallel default(none) \ shared(intor, fill, eri, nkpts_ij, nkpts, comp, nimgs, \ Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, \ atm, natm, bas, nbas, env, nenv, count) { int ish, jsh, ij; double *env_loc = malloc(sizeof(double)*nenv); memcpy(env_loc, env, sizeof(double)*nenv); double *buf = malloc(sizeof(double)*(count+cache_size)); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill)(intor, eri, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } free(buf); free(env_loc); } free(expkL_r); } static void sort2c_ks1(double complex *out, double *bufr, double *bufi, int *shls_slice, int *ao_loc, int nkpts, int comp, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t nij = naoi * naoj; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dimax = ao_loc[msh1] - ao_loc[msh0]; const size_t dmjc = dimax * dj * comp; out += jp; int i, j, kk, ish, ic, di, dij; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts; kk++) { off = kk * dmjc; for (ish = msh0; ish < msh1; ish++) { di = ao_loc[ish+1] - ao_loc[ish]; dij = di * dj; for (ic = 0; ic < comp; ic++) { pout = out + nij*ic + naoj*(ao_loc[ish]-ao_loc[ish0]); pbr = bufr + off + dij*ic; pbi = bufi + off + dij*ic; for (j = 0; j < dj; j++) { for (i = 0; i < di; i++) { pout[i*naoj+j] = pbr[j*di+i] + pbi[j*di+i]*_Complex_I; } } } off += dij * comp; } out += nij * comp; } } static void _nr2c_fill(int (*intor)(), double complex *out, int nkpts, int comp, int nimgs, int jsh, int ish0, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const char TRANS_N = 'N'; const double D1 = 1; const double D0 = 0; ish0 += shls_slice[0]; jsh += jsh0; int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dimax = INTBUFMAX10 / dj; int ishloc[ish1-ish0+1]; int nishloc = shloc_partition(ishloc, ao_loc, ish0, ish1, dimax); int m, msh0, msh1, dmjc, ish, di, empty; int jL; int shls[2]; double *bufk_r = buf; double *bufk_i, *bufL, *pbuf, *cache; shls[1] = jsh; for (m = 0; m < nishloc; m++) { msh0 = ishloc[m]; msh1 = ishloc[m+1]; dimax = ao_loc[msh1] - ao_loc[msh0]; dmjc = dj * dimax * comp; bufk_i = bufk_r + dmjc * nkpts; bufL = bufk_i + dmjc * nkpts; cache = bufL + dmjc * nimgs; pbuf = bufL; for (jL = 0; jL < nimgs; jL++) { shift_bas(env_loc, env, Ls, jptrxyz, jL); for (ish = msh0; ish < msh1; ish++) { shls[0] = ish; di = ao_loc[ish+1] - ao_loc[ish]; if ((*intor)(pbuf, NULL, shls, atm, natm, bas, nbas, env_loc, cintopt, cache)) { empty = 0; } pbuf += di * dj * comp; } } dgemm_(&TRANS_N, &TRANS_N, &dmjc, &nkpts, &nimgs, &D1, bufL, &dmjc, expkL_r, &nimgs, &D0, bufk_r, &dmjc); dgemm_(&TRANS_N, &TRANS_N, &dmjc, &nkpts, &nimgs, &D1, bufL, &dmjc, expkL_i, &nimgs, &D0, bufk_i, &dmjc); sort2c_ks1(out, bufk_r, bufk_i, shls_slice, ao_loc, nkpts, comp, jsh, msh0, msh1); } } /* ('...M,kL->...k', int3c, exp_kL, exp_kL) */ void PBCnr2c_fill_ks1(int (*intor)(), double complex *out, int nkpts, int comp, int nimgs, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr2c_fill(intor, out, nkpts, comp, nimgs, jsh, 0, buf, env_loc, Ls, expkL_r, expkL_i, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } void PBCnr2c_fill_ks2(int (*intor)(), double complex *out, int nkpts, int comp, int nimgs, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr2c_fill(intor, out, nkpts, comp, nimgs, jsh, jsh, buf, env_loc, Ls, expkL_r, expkL_i, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } void PBCnr2c_drv(int (*intor)(), void (*fill)(), double complex *out, int nkpts, int comp, int nimgs, double *Ls, double complex *expkL, int *shls_slice, int *ao_loc, CINTOpt *cintopt, PBCOpt *pbcopt, int *atm, int natm, int *bas, int nbas, double *env, int nenv) { const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int njsh = jsh1 - jsh0; double *expkL_r = malloc(sizeof(double) * nimgs*nkpts * OF_CMPLX); double *expkL_i = expkL_r + nimgs*nkpts; int i; for (i = 0; i < nimgs*nkpts; i++) { expkL_r[i] = creal(expkL[i]); expkL_i[i] = cimag(expkL[i]); } const int cache_size = GTOmax_cache_size(intor, shls_slice, 2, atm, natm, bas, nbas, env); #pragma omp parallel default(none) \ shared(intor, fill, out, nkpts, comp, nimgs, \ Ls, expkL_r, expkL_i, shls_slice, ao_loc, cintopt, pbcopt, \ atm, natm, bas, nbas, env, nenv) { int jsh; double *env_loc = malloc(sizeof(double)*nenv); memcpy(env_loc, env, sizeof(double)*nenv); size_t count = nkpts * OF_CMPLX + nimgs; double *buf = malloc(sizeof(double)*(count*INTBUFMAX10*comp+cache_size)); #pragma omp for schedule(dynamic) for (jsh = 0; jsh < njsh; jsh++) { (*fill)(intor, out, nkpts, comp, nimgs, jsh, buf, env_loc, Ls, expkL_r, expkL_i, shls_slice, ao_loc, cintopt, pbcopt, atm, natm, bas, nbas, env); } free(buf); free(env_loc); } free(expkL_r); }