source
stringlengths
3
92
c
stringlengths
26
2.25M
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(1,ceild(24*t2-Nz+9,4)),3*t1+1),6*t1-6*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(12*t1+Ny+15,4)),floord(24*t2+Ny+11,4)),floord(24*t1-24*t2+Nz+Ny+13,4));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-126,128)),ceild(3*t1-254,256)),ceild(24*t2-Nz-1011,1024)),ceild(4*t3-Ny-1011,1024));t4<=min(min(min(min(floord(4*Nt+Nx-9,1024),floord(12*t1+Nx+15,1024)),floord(24*t2+Nx+11,1024)),floord(4*t3+Nx-9,1024)),floord(24*t1-24*t2+Nz+Nx+13,1024));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),t3-1),256*t4+254);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_Global.c
//------------------------------------------------------------------------------ // GB_Global: global values in GraphBLAS //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // All Global storage is declared, initialized, and accessed here. The // contents of the GB_Global struct are only accessible to functions in this // file. Global storage is used to keep track of the GraphBLAS mode (blocking // or non-blocking), for pointers to malloc/calloc/realloc/free functions, // global matrix options, and other settings. #include "GB_atomics.h" //------------------------------------------------------------------------------ // Global storage: for all threads in a user application that uses GraphBLAS //------------------------------------------------------------------------------ typedef struct { //-------------------------------------------------------------------------- // blocking/non-blocking mode, set by GrB_init //-------------------------------------------------------------------------- GrB_Mode mode ; // GrB_NONBLOCKING or GrB_BLOCKING bool GrB_init_called ; // true if GrB_init already called //-------------------------------------------------------------------------- // threading control //-------------------------------------------------------------------------- int nthreads_max ; // max number of threads to use double chunk ; // chunk size for determining # threads to use //-------------------------------------------------------------------------- // hypersparsity and CSR/CSC format control //-------------------------------------------------------------------------- float bitmap_switch [GxB_NBITMAP_SWITCH] ; // default bitmap_switch float hyper_switch ; // default hyper_switch for new matrices bool is_csc ; // default CSR/CSC format for new matrices //-------------------------------------------------------------------------- // abort function: only used for debugging //-------------------------------------------------------------------------- void (* abort_function ) (void) ; //-------------------------------------------------------------------------- // malloc/calloc/realloc/free: memory management functions //-------------------------------------------------------------------------- // All threads must use the same malloc/calloc/realloc/free functions. // They default to the ANSI C11 functions, but can be defined by GxB_init. void * (* malloc_function ) (size_t) ; // required // void * (* calloc_function ) (size_t, size_t) ; // no longer used void * (* realloc_function ) (void *, size_t) ; // may be NULL void (* free_function ) (void *) ; // required bool malloc_is_thread_safe ; // default is true //-------------------------------------------------------------------------- // memory usage tracking: for testing and debugging only //-------------------------------------------------------------------------- // malloc_tracking: default is false. There is no user-accessible API for // setting this to true. If true, the following statistics are computed. // If false, all of the following are unused. // nmalloc: To aid in searching for memory leaks, GraphBLAS keeps track of // the number of blocks of allocated that have not yet been freed. The // count starts at zero. GB_malloc_memory and GB_calloc_memory increment // this count, and free (of a non-NULL pointer) decrements it. realloc // increments the count it if is allocating a new block, but it does this // by calling GB_malloc_memory. // malloc_debug: this is used for testing only (GraphBLAS/Tcov). If true, // then use malloc_debug_count for testing memory allocation and // out-of-memory conditions. If malloc_debug_count > 0, the value is // decremented after each allocation of memory. If malloc_debug_count <= // 0, the GB_*_memory routines pretend to fail; returning NULL and not // allocating anything. bool malloc_tracking ; // true if allocations are being tracked int64_t nmalloc ; // number of blocks allocated but not freed bool malloc_debug ; // if true, test memory handling int64_t malloc_debug_count ; // for testing memory handling //-------------------------------------------------------------------------- // for testing and development //-------------------------------------------------------------------------- int64_t hack [2] ; // settings for testing/developement only //-------------------------------------------------------------------------- // diagnostic output //-------------------------------------------------------------------------- bool burble ; // controls GBURBLE output GB_printf_function_t printf_func ; // pointer to printf GB_flush_function_t flush_func ; // pointer to flush //-------------------------------------------------------------------------- // for MATLAB interface only //-------------------------------------------------------------------------- bool print_one_based ; // if true, print 1-based indices //-------------------------------------------------------------------------- // timing: for code development only //-------------------------------------------------------------------------- double timing [40] ; //-------------------------------------------------------------------------- // for malloc debugging only //-------------------------------------------------------------------------- #ifdef GB_DEBUG #define GB_MEMTABLE_SIZE 10000 GB_void *memtable_p [GB_MEMTABLE_SIZE] ; size_t memtable_s [GB_MEMTABLE_SIZE] ; #endif int nmemtable ; //-------------------------------------------------------------------------- // internal memory pool //-------------------------------------------------------------------------- // free_pool [k] is a pointer to a link list of freed blocks, all of size // exactly equal to 2^k. The total number of blocks in the kth pool is // given by free_pool_nblocks [k], and the upper bound on this is given by // free_pool_limit [k]. If any additional blocks of size 2^k above that // limit are freed by GB_dealloc_memory, they are not placed in the pool, // but actually freed instead. void *free_pool [64] ; int64_t free_pool_nblocks [64] ; int64_t free_pool_limit [64] ; //-------------------------------------------------------------------------- // CUDA (DRAFT: in progress) //-------------------------------------------------------------------------- int gpu_count ; // # of GPUs in the system GrB_Desc_Value gpu_control ; // always, never, or default double gpu_chunk ; // min problem size for using a GPU // properties of each GPU: GB_cuda_device gpu_properties [GB_CUDA_MAX_GPUS] ; } GB_Global_struct ; GB_PUBLIC GB_Global_struct GB_Global ; GB_Global_struct GB_Global = { // GraphBLAS mode .mode = GrB_NONBLOCKING, // default is nonblocking // initialization flag .GrB_init_called = false, // GrB_init has not yet been called // max number of threads and chunk size .nthreads_max = 1, .chunk = GB_CHUNK_DEFAULT, // min dimension density #define GB_BITSWITCH_1 ((float) 0.04) #define GB_BITSWITCH_2 ((float) 0.05) #define GB_BITSWITCH_3_to_4 ((float) 0.06) #define GB_BITSWITCH_5_to_8 ((float) 0.08) #define GB_BITSWITCH_9_to_16 ((float) 0.10) #define GB_BITSWITCH_17_to_32 ((float) 0.20) #define GB_BITSWITCH_33_to_64 ((float) 0.30) #define GB_BITSWITCH_gt_than_64 ((float) 0.40) // default format .bitmap_switch = { GB_BITSWITCH_1, GB_BITSWITCH_2, GB_BITSWITCH_3_to_4, GB_BITSWITCH_5_to_8, GB_BITSWITCH_9_to_16, GB_BITSWITCH_17_to_32, GB_BITSWITCH_33_to_64, GB_BITSWITCH_gt_than_64 }, .hyper_switch = GB_HYPER_SWITCH_DEFAULT, .is_csc = (GB_FORMAT_DEFAULT != GxB_BY_ROW), // default is GxB_BY_ROW // abort function for debugging only .abort_function = abort, // malloc/calloc/realloc/free functions: default to ANSI C11 functions .malloc_function = malloc, // .calloc_function = NULL, // no longer used .realloc_function = realloc, .free_function = free, .malloc_is_thread_safe = true, // malloc tracking, for testing, statistics, and debugging only .malloc_tracking = false, .nmalloc = 0, // memory block counter .malloc_debug = false, // do not test memory handling .malloc_debug_count = 0, // counter for testing memory handling // for testing and development only .hack = {0, 0}, // diagnostics .burble = false, .printf_func = NULL, .flush_func = NULL, // for MATLAB interface only .print_one_based = false, // if true, print 1-based indices .timing = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // for malloc debugging only .nmemtable = 0, // memtable is empty // all free_pool lists start out empty .free_pool = { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }, .free_pool_nblocks = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // default limits on the number of free blocks in each list: .free_pool_limit = { 0, // size 2^0 = 1 byte none 0, // size 2^1 = 2 none 0, // size 2^2 = 4 none 16483, // size 2^3 = 8 (2^14 blocks * 2^3 = 128 KB total) 16483, // size 2^4 = 16 bytes (2^14 blocks * 2^4 = 256 KB total) 16483, // size 2^5 = 32 (2^14 blocks * 2^5 = 512 KB total) 16483, // size 2^6 = 64 (2^14 blocks * 2^6 = 1 MB total) 16483, // size 2^7 = 128 (2^14 blocks * 2^7 = 2 MB total) 16483, // size 2^8 = 256 (2^14 blocks * 2^8 = 4 MB total) 8192, // size 2^9 = 512 (2^13 blocks * 2^9 = 4 MB total) 4096, // size 2^10 = 1 KB (2^12 blocks * 2^10 = 4 MB total) 2048, // size 2^11 = 2 KB (2^11 blocks * 2^11 = 4 MB total) 1024, // size 2^12 = 4 KB (2^10 blocks * 2^12 = 4 MB total) 512, // size 2^13 = 8 KB (2^9 blocks * 2^13 = 4 MB total) 256, // size 2^14 = 16 KB (2^8 blocks * 2^14 = 4 MB total) 128, // size 2^15 = 32 KB (2^7 blocks * 2^15 = 4 MB total) 64, // size 2^16 = 64 KB (2^6 blocks * 2^16 = 4 MB total) 32, // size 2^17 = 128 KB (2^5 blocks * 2^17 = 4 MB total) 16, // size 2^18 = 256 KB (2^4 blocks * 2^18 = 4 MB total) 8, // size 2^19 = 512 KB (2^3 blocks * 2^19 = 4 MB total) // maximum total size = about 52 MB // by default, no blocks larger than 512 KB are kept in the free_pool 0, // size 2^20 = 1 MB 0, // size 2^21 0, // size 2^22 0, // size 2^23 0, // size 2^24 0, // size 2^25 0, // size 2^26 0, // size 2^27 0, // size 2^28 0, // size 2^29 0, // size 2^30 (1 GB) 0, // size 2^31 0, // size 2^32 0, // size 2^33 0, // size 2^34 0, // size 2^35 0, // size 2^36 0, // size 2^37 0, // size 2^38 0, // size 2^39 // These larger sizes are of course unlikely to appear, but adding all // 64 possibilities means that the free_pool does not need to check an // upper bound. 0, // size 2^40 (1 TB) 0, // size 2^41 0, // size 2^42 0, // size 2^43 0, // size 2^44 0, // size 2^45 0, // size 2^46 0, // size 2^47 0, // size 2^48 0, // size 2^49 0, // size 2^50 (1 PB) 0, // size 2^51 0, // size 2^52 0, // size 2^53 0, // size 2^54 0, // size 2^55 0, // size 2^56 0, // size 2^57 0, // size 2^58 0, // size 2^59 0, // size 2^60 (1 exabyte) 0, // size 2^61 0, // size 2^62 0 }, // size 2^63 (4 exabytes!) // CUDA environment (DRAFT: in progress) .gpu_count = 0, // # of GPUs in the system .gpu_control = GxB_DEFAULT, // always, never, or default .gpu_chunk = GB_GPU_CHUNK_DEFAULT, // min problem size for using a GPU } ; //============================================================================== // GB_Global access functions //============================================================================== //------------------------------------------------------------------------------ // mode //------------------------------------------------------------------------------ void GB_Global_mode_set (GrB_Mode mode) { GB_Global.mode = mode ; } GrB_Mode GB_Global_mode_get (void) { return (GB_Global.mode) ; } //------------------------------------------------------------------------------ // GrB_init_called //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_GrB_init_called_set (bool GrB_init_called) { GB_Global.GrB_init_called = GrB_init_called ; } GB_PUBLIC bool GB_Global_GrB_init_called_get (void) { return (GB_Global.GrB_init_called) ; } //------------------------------------------------------------------------------ // nthreads_max //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_nthreads_max_set (int nthreads_max) { GB_Global.nthreads_max = GB_IMAX (nthreads_max, 1) ; } GB_PUBLIC int GB_Global_nthreads_max_get (void) { return (GB_Global.nthreads_max) ; } //------------------------------------------------------------------------------ // OpenMP max_threads //------------------------------------------------------------------------------ GB_PUBLIC int GB_Global_omp_get_max_threads (void) { return (GB_OPENMP_MAX_THREADS) ; } //------------------------------------------------------------------------------ // chunk //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_chunk_set (double chunk) { if (chunk <= GxB_DEFAULT) chunk = GB_CHUNK_DEFAULT ; GB_Global.chunk = fmax (chunk, 1) ; } GB_PUBLIC double GB_Global_chunk_get (void) { return (GB_Global.chunk) ; } //------------------------------------------------------------------------------ // hyper_switch //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_hyper_switch_set (float hyper_switch) { GB_Global.hyper_switch = hyper_switch ; } GB_PUBLIC float GB_Global_hyper_switch_get (void) { return (GB_Global.hyper_switch) ; } //------------------------------------------------------------------------------ // bitmap_switch //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_bitmap_switch_set (int k, float b) { k = GB_IMAX (k, 0) ; k = GB_IMIN (k, 7) ; GB_Global.bitmap_switch [k] = b ; } GB_PUBLIC float GB_Global_bitmap_switch_get (int k) { k = GB_IMAX (k, 0) ; k = GB_IMIN (k, 7) ; return (GB_Global.bitmap_switch [k]) ; } GB_PUBLIC float GB_Global_bitmap_switch_matrix_get (int64_t vlen, int64_t vdim) { int64_t d = GB_IMIN (vlen, vdim) ; if (d <= 1) return (GB_Global.bitmap_switch [0]) ; if (d <= 2) return (GB_Global.bitmap_switch [1]) ; if (d <= 4) return (GB_Global.bitmap_switch [2]) ; if (d <= 8) return (GB_Global.bitmap_switch [3]) ; if (d <= 16) return (GB_Global.bitmap_switch [4]) ; if (d <= 32) return (GB_Global.bitmap_switch [5]) ; if (d <= 64) return (GB_Global.bitmap_switch [6]) ; return (GB_Global.bitmap_switch [7]) ; } GB_PUBLIC void GB_Global_bitmap_switch_default (void) { GB_Global.bitmap_switch [0] = GB_BITSWITCH_1 ; GB_Global.bitmap_switch [1] = GB_BITSWITCH_2 ; GB_Global.bitmap_switch [2] = GB_BITSWITCH_3_to_4 ; GB_Global.bitmap_switch [3] = GB_BITSWITCH_5_to_8 ; GB_Global.bitmap_switch [4] = GB_BITSWITCH_9_to_16 ; GB_Global.bitmap_switch [5] = GB_BITSWITCH_17_to_32 ; GB_Global.bitmap_switch [6] = GB_BITSWITCH_33_to_64 ; GB_Global.bitmap_switch [7] = GB_BITSWITCH_gt_than_64 ; } //------------------------------------------------------------------------------ // is_csc //------------------------------------------------------------------------------ void GB_Global_is_csc_set (bool is_csc) { GB_Global.is_csc = is_csc ; } bool GB_Global_is_csc_get (void) { return (GB_Global.is_csc) ; } //------------------------------------------------------------------------------ // abort_function //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_abort_function_set (void (* abort_function) (void)) { GB_Global.abort_function = abort_function ; } GB_PUBLIC void GB_Global_abort_function (void) { GB_Global.abort_function ( ) ; } //------------------------------------------------------------------------------ // malloc debuging //------------------------------------------------------------------------------ // These functions keep a separate record of the pointers to all allocated // blocks of memory and their sizes, just for sanity checks. GB_PUBLIC void GB_Global_memtable_dump (void) { #ifdef GB_DEBUG printf ("\nmemtable dump: %d nmalloc %ld\n", GB_Global.nmemtable, GB_Global.nmalloc) ; for (int k = 0 ; k < GB_Global.nmemtable ; k++) { printf (" %4d: %12p : %ld\n", k, GB_Global.memtable_p [k], GB_Global.memtable_s [k]) ; } #endif } GB_PUBLIC int GB_Global_memtable_n (void) { return (GB_Global.nmemtable) ; } GB_PUBLIC void GB_Global_memtable_clear (void) { GB_Global.nmemtable = 0 ; } // add a pointer to the table of malloc'd blocks GB_PUBLIC void GB_Global_memtable_add (void *p, size_t size) { #ifdef GB_DEBUG ASSERT ((p == NULL) == (size == 0)) ; if (p == NULL) return ; bool fail = false ; // printf ("memtable add %p size %ld\n", p, size) ; #pragma omp critical(GB_memtable) { int n = GB_Global.nmemtable ; fail = (n > GB_MEMTABLE_SIZE) ; if (!fail) { for (int i = 0 ; i < n ; i++) { if (p == GB_Global.memtable_p [i]) { printf ("\nadd duplicate %p size %ld\n", p, size) ; GB_Global_memtable_dump ( ) ; printf ("Hey %d %p\n", i,p) ; fail = true ; break ; } } } if (!fail && p != NULL) { GB_Global.memtable_p [n] = p ; GB_Global.memtable_s [n] = size ; GB_Global.nmemtable++ ; } } ASSERT (!fail) ; // GB_Global_memtable_dump ( ) ; #endif } // get the size of a malloc'd block GB_PUBLIC size_t GB_Global_memtable_size (void *p) { size_t size = 0 ; #ifdef GB_DEBUG if (p == NULL) return (0) ; bool found = false ; #pragma omp critical(GB_memtable) { int n = GB_Global.nmemtable ; for (int i = 0 ; i < n ; i++) { if (p == GB_Global.memtable_p [i]) { size = GB_Global.memtable_s [i] ; found = true ; break ; } } } if (!found) { printf ("\nFAIL: %p not found\n", p) ; GB_Global_memtable_dump ( ) ; ASSERT (0) ; } #endif return (size) ; } // test if a malloc'd block is in the table GB_PUBLIC bool GB_Global_memtable_find (void *p) { bool found = false ; #ifdef GB_DEBUG if (p == NULL) return (false) ; #pragma omp critical(GB_memtable) { int n = GB_Global.nmemtable ; for (int i = 0 ; i < n ; i++) { if (p == GB_Global.memtable_p [i]) { found = true ; break ; } } } #endif return (found) ; } // remove a pointer from the table of malloc'd blocks GB_PUBLIC void GB_Global_memtable_remove (void *p) { #ifdef GB_DEBUG if (p == NULL) return ; bool found = false ; // printf ("memtable remove %p ", p) ; #pragma omp critical(GB_memtable) { int n = GB_Global.nmemtable ; for (int i = 0 ; i < n ; i++) { if (p == GB_Global.memtable_p [i]) { // found p in the table; remove it // printf ("size %ld\n", GB_Global.memtable_s [i]) ; GB_Global.memtable_p [i] = GB_Global.memtable_p [n-1] ; GB_Global.memtable_s [i] = GB_Global.memtable_s [n-1] ; GB_Global.nmemtable -- ; found = true ; break ; } } } if (!found) { printf ("remove %p NOT FOUND\n", p) ; GB_Global_memtable_dump ( ) ; } ASSERT (found) ; // GB_Global_memtable_dump ( ) ; #endif } //------------------------------------------------------------------------------ // malloc_function //------------------------------------------------------------------------------ void GB_Global_malloc_function_set (void * (* malloc_function) (size_t)) { GB_Global.malloc_function = malloc_function ; } void * GB_Global_malloc_function (size_t size) { void *p = NULL ; if (GB_Global.malloc_is_thread_safe) { p = GB_Global.malloc_function (size) ; } else { #pragma omp critical(GB_malloc_protection) { p = GB_Global.malloc_function (size) ; } } #ifdef GB_DEBUG GB_Global_memtable_add (p, size) ; #endif return (p) ; } //------------------------------------------------------------------------------ // calloc_function: no longer used //------------------------------------------------------------------------------ // void GB_Global_calloc_function_set (void * (* calloc_function) (size_t, size_t)) // { // GB_Global.calloc_function = calloc_function ; // } // bool GB_Global_have_calloc_function (void) // { // return (GB_Global.calloc_function != NULL) ; // } // void * GB_Global_calloc_function (size_t count, size_t size) // { // void *p = NULL ; // if (GB_Global.malloc_is_thread_safe) // { // p = GB_Global.calloc_function (count, size) ; // } // else // { // #pragma omp critical(GB_malloc_protection) // { // p = GB_Global.calloc_function (count, size) ; // } // } // #ifdef GB_DEBUG // GB_Global_memtable_add (p, count * size) ; // #endif // return (p) ; // } //------------------------------------------------------------------------------ // realloc_function //------------------------------------------------------------------------------ void GB_Global_realloc_function_set ( void * (* realloc_function) (void *, size_t) ) { GB_Global.realloc_function = realloc_function ; } bool GB_Global_have_realloc_function (void) { return (GB_Global.realloc_function != NULL) ; } void * GB_Global_realloc_function (void *p, size_t size) { void *pnew = NULL ; if (GB_Global.malloc_is_thread_safe) { pnew = GB_Global.realloc_function (p, size) ; } else { #pragma omp critical(GB_malloc_protection) { pnew = GB_Global.realloc_function (p, size) ; } } #ifdef GB_DEBUG if (pnew != NULL) { GB_Global_memtable_remove (p) ; GB_Global_memtable_add (pnew, size) ; } #endif return (pnew) ; } //------------------------------------------------------------------------------ // free_function //------------------------------------------------------------------------------ void GB_Global_free_function_set (void (* free_function) (void *)) { GB_Global.free_function = free_function ; } void GB_Global_free_function (void *p) { if (GB_Global.malloc_is_thread_safe) { GB_Global.free_function (p) ; } else { #pragma omp critical(GB_malloc_protection) { GB_Global.free_function (p) ; } } #ifdef GB_DEBUG GB_Global_memtable_remove (p) ; #endif } //------------------------------------------------------------------------------ // malloc_is_thread_safe //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_malloc_is_thread_safe_set (bool malloc_is_thread_safe) { GB_Global.malloc_is_thread_safe = malloc_is_thread_safe ; } GB_PUBLIC bool GB_Global_malloc_is_thread_safe_get (void) { return (GB_Global.malloc_is_thread_safe) ; } //------------------------------------------------------------------------------ // malloc_tracking //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_malloc_tracking_set (bool malloc_tracking) { GB_Global.malloc_tracking = malloc_tracking ; } bool GB_Global_malloc_tracking_get (void) { return (GB_Global.malloc_tracking) ; } //------------------------------------------------------------------------------ // nmalloc //------------------------------------------------------------------------------ void GB_Global_nmalloc_clear (void) { GB_ATOMIC_WRITE GB_Global.nmalloc = 0 ; } GB_PUBLIC int64_t GB_Global_nmalloc_get (void) { int64_t nmalloc ; GB_ATOMIC_READ nmalloc = GB_Global.nmalloc ; return (nmalloc) ; } void GB_Global_nmalloc_increment (void) { GB_ATOMIC_UPDATE GB_Global.nmalloc++ ; } GB_PUBLIC void GB_Global_nmalloc_decrement (void) { GB_ATOMIC_UPDATE GB_Global.nmalloc-- ; } //------------------------------------------------------------------------------ // malloc_debug //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_malloc_debug_set (bool malloc_debug) { GB_ATOMIC_WRITE GB_Global.malloc_debug = malloc_debug ; } bool GB_Global_malloc_debug_get (void) { bool malloc_debug ; GB_ATOMIC_READ malloc_debug = GB_Global.malloc_debug ; return (malloc_debug) ; } //------------------------------------------------------------------------------ // malloc_debug_count //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_malloc_debug_count_set (int64_t malloc_debug_count) { GB_ATOMIC_WRITE GB_Global.malloc_debug_count = malloc_debug_count ; } bool GB_Global_malloc_debug_count_decrement (void) { GB_ATOMIC_UPDATE GB_Global.malloc_debug_count-- ; int64_t malloc_debug_count ; GB_ATOMIC_READ malloc_debug_count = GB_Global.malloc_debug_count ; return (malloc_debug_count <= 0) ; } //------------------------------------------------------------------------------ // hack: for setting an internal flag for testing and development only //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_hack_set (int k, int64_t hack) { GB_Global.hack [k] = hack ; } GB_PUBLIC int64_t GB_Global_hack_get (int k) { return (GB_Global.hack [k]) ; } //------------------------------------------------------------------------------ // burble: for controlling the burble output //------------------------------------------------------------------------------ void GB_Global_burble_set (bool burble) { GB_Global.burble = burble ; } GB_PUBLIC bool GB_Global_burble_get (void) { return (GB_Global.burble) ; } GB_PUBLIC GB_printf_function_t GB_Global_printf_get ( ) { return (GB_Global.printf_func) ; } GB_PUBLIC GB_flush_function_t GB_Global_flush_get ( ) { return (GB_Global.flush_func) ; } GB_PUBLIC void GB_Global_printf_set (GB_printf_function_t pr_func) { GB_Global.printf_func = pr_func ; } GB_PUBLIC void GB_Global_flush_set (GB_flush_function_t fl_func) { GB_Global.flush_func = fl_func ; } //------------------------------------------------------------------------------ // for MATLAB interface only //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_print_one_based_set (bool onebased) { GB_Global.print_one_based = onebased ; } GB_PUBLIC bool GB_Global_print_one_based_get (void) { return (GB_Global.print_one_based) ; } //------------------------------------------------------------------------------ // CUDA (DRAFT: in progress) //------------------------------------------------------------------------------ void GB_Global_gpu_control_set (GrB_Desc_Value gpu_control) { // set the GPU control to always, never, or default if (GB_Global.gpu_count > 0) { // one or more GPUs are available: set gpu_control to // always, never, or default. if (gpu_control == GxB_GPU_ALWAYS || gpu_control == GxB_GPU_NEVER) { GB_Global.gpu_control = gpu_control ; } else { GB_Global.gpu_control = GxB_DEFAULT ; } } else { // no GPUs available: never use a GPU GB_Global.gpu_control = GxB_GPU_NEVER ; } } GrB_Desc_Value GB_Global_gpu_control_get (void) { // get the GPU control parameter return (GB_Global.gpu_control) ; } void GB_Global_gpu_chunk_set (double gpu_chunk) { // set the GPU chunk factor if (gpu_chunk < 1) gpu_chunk = GB_GPU_CHUNK_DEFAULT ; GB_Global.gpu_chunk = gpu_chunk ; } double GB_Global_gpu_chunk_get (void) { // get the GPU chunk factor return (GB_Global.gpu_chunk) ; } bool GB_Global_gpu_count_set (bool enable_cuda) { // set the # of GPUs in the system; // this function is only called once, by GB_init. #if defined ( GBCUDA ) if (enable_cuda) { return (GB_cuda_get_device_count (&GB_Global.gpu_count)) ; } else #endif { // no GPUs available, or available but not requested GB_Global.gpu_count = 0 ; return (true) ; } } int GB_Global_gpu_count_get (void) { // get the # of GPUs in the system return (GB_Global.gpu_count) ; } #define GB_GPU_DEVICE_CHECK(error) \ if (device < 0 || device >= GB_Global.gpu_count) return (error) ; size_t GB_Global_gpu_memorysize_get (int device) { // get the memory size of a specific GPU GB_GPU_DEVICE_CHECK (0) ; // memory size zero if invalid GPU return (GB_Global.gpu_properties [device].total_global_memory) ; } int GB_Global_gpu_sm_get (int device) { // get the # of SMs in a specific GPU GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU return (GB_Global.gpu_properties [device].number_of_sms) ; } bool GB_Global_gpu_device_pool_size_set( int device, size_t size) { GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU GB_Global.gpu_properties [device].pool_size = (int) size ; return( true); } bool GB_Global_gpu_device_max_pool_size_set( int device, size_t size) { GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU GB_Global.gpu_properties[device].max_pool_size = (int) size ; return( true); } bool GB_Global_gpu_device_memory_resource_set( int device, void *resource) { GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU GB_Global.gpu_properties[device].memory_resource = resource; return( true); } void* GB_Global_gpu_device_memory_resource_get( int device ) { GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU return ( GB_Global.gpu_properties [device].memory_resource ) ; //NOTE: this returns a void*, needs to be cast to be used } bool GB_Global_gpu_device_properties_get (int device) { // get all properties of a specific GPU; // this function is only called once per GPU, by GB_init. GB_GPU_DEVICE_CHECK (false) ; // fail if invalid GPU #if defined ( GBCUDA ) return (GB_cuda_get_device_properties (device, &(GB_Global.gpu_properties [device]))) ; #else // if no GPUs exist, they cannot be queried return (false) ; #endif } //------------------------------------------------------------------------------ // timing: for code development only //------------------------------------------------------------------------------ GB_PUBLIC void GB_Global_timing_clear_all (void) { for (int k = 0 ; k < 40 ; k++) { GB_Global.timing [k] = 0 ; } } GB_PUBLIC void GB_Global_timing_clear (int k) { GB_Global.timing [k] = 0 ; } GB_PUBLIC void GB_Global_timing_set (int k, double t) { GB_Global.timing [k] = t ; } GB_PUBLIC void GB_Global_timing_add (int k, double t) { GB_Global.timing [k] += t ; } GB_PUBLIC double GB_Global_timing_get (int k) { return (GB_Global.timing [k]) ; } //------------------------------------------------------------------------------ // free_pool: fast access to free memory blocks //------------------------------------------------------------------------------ // each free block contains a pointer to the next free block. This requires // the free block to be at least 8 bytes in size. #define GB_NEXT(p) ((void **) p) [0] // free_pool_init: initialize the free_pool GB_PUBLIC void GB_Global_free_pool_init (bool clear) { #pragma omp critical(GB_free_pool) { if (clear) { // clear the free pool for (int k = 0 ; k < 64 ; k++) { GB_Global.free_pool [k] = NULL ; GB_Global.free_pool_nblocks [k] = 0 ; } } // set the default free_pool_limit for (int k = 0 ; k < 64 ; k++) { GB_Global.free_pool_limit [k] = 0 ; } int64_t n = 16384 ; for (int k = 3 ; k <= 8 ; k++) { GB_Global.free_pool_limit [k] = n ; } for (int k = 9 ; k <= 19 ; k++) { n = n/2 ; GB_Global.free_pool_limit [k] = n ; } } } #ifdef GB_DEBUG // check if a block is valid static inline void GB_Global_free_pool_check (void *p, int k, char *where) { // check the size of the block // printf ("check %p\n", p) ; ASSERT (k >= 3 && k < 64) ; ASSERT (p != NULL) ; size_t size = GB_Global_memtable_size (p) ; ASSERT (size == ((size_t) 1) << k) ; } #endif // free_pool_get: get a block from the free_pool, or return NULL if none GB_PUBLIC void *GB_Global_free_pool_get (int k) { void *p = NULL ; ASSERT (k >= 3 && k < 64) ; #pragma omp critical(GB_free_pool) { p = GB_Global.free_pool [k] ; if (p != NULL) { // remove the block from the kth free_pool GB_Global.free_pool_nblocks [k]-- ; GB_Global.free_pool [k] = GB_NEXT (p) ; } } if (p != NULL) { // clear the next pointer inside the block, since the block needs // to be all zero // printf ("got %p k %d\n", p, k) ; #ifdef GB_DEBUG GB_Global_free_pool_check (p, k, "get") ; #endif // GB_Global_free_pool_dump (2) ; printf ("\ndid get\n\n") ; } return (p) ; } // free_pool_put: put a block in the free_pool, unless it is full GB_PUBLIC bool GB_Global_free_pool_put (void *p, int k) { #ifdef GB_DEBUG GB_Global_free_pool_check (p, k, "put") ; #endif bool returned_to_pool = false ; #pragma omp critical(GB_free_pool) { returned_to_pool = (GB_Global.free_pool_nblocks [k] < GB_Global.free_pool_limit [k]) ; if (returned_to_pool) { // add the block to the head of the free_pool list // printf ("put %p k %d\n", p, k) ; GB_Global.free_pool_nblocks [k]++ ; GB_NEXT (p) = GB_Global.free_pool [k] ; GB_Global.free_pool [k] = p ; } } // GB_Global_free_pool_dump (2) ; printf ("\ndid put\n\n") ; return (returned_to_pool) ; } // free_pool_dump: check the validity of the free_pool GB_PUBLIC void GB_Global_free_pool_dump (int pr) { #ifdef GB_DEBUG bool fail = false ; #pragma omp critical(GB_free_pool) { for (int k = 0 ; k < 64 && !fail ; k++) { int64_t nblocks = GB_Global.free_pool_nblocks [k] ; int64_t limit = GB_Global.free_pool_limit [k] ; if (nblocks != 0 && pr > 0) { printf ("pool %2d: %8ld blocks, %8ld limit\n", k, nblocks, limit) ; } int64_t nblocks_actual = 0 ; void *p = GB_Global.free_pool [k] ; for ( ; p != NULL && !fail ; p = GB_NEXT (p)) { if (pr > 1) printf (" %16p ", p) ; size_t size = GB_Global_memtable_size (p) ; if (pr > 1) printf ("size: %ld\n", size) ; nblocks_actual++ ; fail = fail || (size != ((size_t) 1) << k) ; if (fail && pr > 0) printf (" fail\n") ; fail = fail || (nblocks_actual > nblocks) ; } if (nblocks_actual != nblocks) { if (pr > 0) printf ("fail: # blocks %ld %ld\n", nblocks_actual, nblocks) ; fail = true ; } } } ASSERT (!fail) ; #endif } // free_pool_limit_get: get the limit on the # of blocks in the kth pool GB_PUBLIC int64_t GB_Global_free_pool_limit_get (int k) { int64_t nblocks = 0 ; if (k >= 3 && k < 64) { #pragma omp critical(GB_free_pool) { nblocks = GB_Global.free_pool_limit [k] ; } } return (nblocks) ; } // free_pool_limit_set: set the limit on the # of blocks in the kth pool GB_PUBLIC void GB_Global_free_pool_limit_set (int k, int64_t nblocks) { if (k >= 3 && k < 64) { #pragma omp critical(GB_free_pool) { GB_Global.free_pool_limit [k] = nblocks ; } } } // free_pool_nblocks_total: total # of blocks in free_pool (for debug only) GB_PUBLIC int64_t GB_Global_free_pool_nblocks_total (void) { int64_t nblocks = 0 ; #pragma omp critical(GB_free_pool) { for (int k = 0 ; k < 64 ; k++) { nblocks += GB_Global.free_pool_nblocks [k] ; } } return (nblocks) ; }
net_sha1_fmt_plug.c
/* Cracker for "Keyed SHA1" network authentication hashes. * * This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Added linkage to dynamic (type dynamic_40) for any salt 230 bytes or less, * by Jim Fougeron. Any salts > 239 bytes will still be handled by this full * format. dynamic is limited to 256 bytes, which 'should' get us 240 bytes * of salt. I think we might be able to get 239 bytes (due to a few issues). * 240 byte salts fail. So, for peace of mind, I am limiting to 230 byte salts * within dynamic. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_netsha1; #elif FMT_REGISTERS_H john_register_one(&fmt_netsha1); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 2048 // XXX #endif #include "arch.h" #include "formats.h" #include "dynamic.h" #include "sha.h" #include "misc.h" #include "common.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "net-sha1" #define FORMAT_NAME "\"Keyed SHA1\" BFD" #define FORMAT_TAG "$netsha1$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 20 // get this right ;) #define BINARY_SIZE 20 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN MEM_ALIGN_WORD #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define HEXCHARS "0123456789abcdef" #define MAX_SALT_LEN 1024 static struct fmt_tests tests[] = { /* Real hashes from Cisco routers ;) */ {"$netsha1$20440a340000000100000000000f4240000f424000000000051c010000000001$709d3307304d790f58bf0a3cefd783b438408996", "password12345"}, {"$netsha1$20440a340000000100000000000f4240000f424000000000051c010000000002$94bce4d9084199508669b39f044064082a093de3", "password12345"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void get_ptr(); static void init(struct fmt_main *self); #define MAGIC 0xfe5aa5ef static struct custom_salt { ARCH_WORD_32 magic; int length; unsigned char salt[MAX_SALT_LEN]; // fixed size, but should be OK } *cur_salt; static int dyna_salt_seen=0; static char Conv_Buf[300]; // max salt length we will pass to dyna is 230. 300 is MORE than enough. static struct fmt_main *pDynamicFmt, *pNetSha1_Dyna; /* this function converts a 'native' net-sha1 signature string into a $dynamic_40$ syntax string */ static char *Convert(char *Buf, char *ciphertext) { char *cp, *cp2; if (text_in_dynamic_format_already(pDynamicFmt, ciphertext)) return ciphertext; cp = strchr(&ciphertext[2], '$'); if (!cp) return "*"; cp2 = strchr(&cp[1], '$'); if (!cp2) return "*"; snprintf(Buf, sizeof(Conv_Buf), "$dynamic_40$%s$HEX%*.*s", &cp2[1], (int)(cp2-cp), (int)(cp2-cp), cp); return Buf; } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q = NULL; int len; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; q = strrchr(ciphertext, '$'); if (!q) return 0; q = q + 1; if ((q - p - 1) > MAX_SALT_LEN * 2) return 0; len = strspn(q, HEXCHARS); if (len != BINARY_SIZE * 2 || len != strlen(q)) { get_ptr(); return pDynamicFmt->methods.valid(ciphertext, pDynamicFmt); } if (strspn(p, HEXCHARS) != q - p - 1) return 0; return 1; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *orig_ct = ciphertext; int i, len; memset(&cs, 0, sizeof(cs)); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; len = (strrchr(ciphertext, '$') - ciphertext) / 2; for (i = 0; i < len; i++) cs.salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2 * i + 1])]; if (len < 230) { // return our memset buffer (putting the dyna salt pointer into it). // This keeps teh 'pre-cleaned salt() warning from hitting this format) //return pDynamicFmt->methods.salt(Convert(Conv_Buf, orig_ct)); memcpy((char*)(&cs), pDynamicFmt->methods.salt(Convert(Conv_Buf, orig_ct)), pDynamicFmt->params.salt_size); dyna_salt_seen=1; return &cs; } cs.magic = MAGIC; cs.length = len; return &cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (text_in_dynamic_format_already(pDynamicFmt, ciphertext)) { unsigned char *cp = pDynamicFmt->methods.binary(ciphertext); memset(out, 0, sizeof(buf.c)); memcpy(out, cp, pDynamicFmt->params.binary_size); // binary size is 16 return out; } p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[0](index); return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[1](index); return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[2](index); return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[3](index); return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[4](index); return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[5](index); return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[6](index); return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; get_ptr(); if (cur_salt->magic != MAGIC) { pDynamicFmt->methods.set_salt(salt); } } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; if (cur_salt->magic != MAGIC) { return pDynamicFmt->methods.crypt_all(pcount, salt); } #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, cur_salt->salt, cur_salt->length); SHA1_Update(&ctx, saved_key[index], PLAINTEXT_LENGTH); SHA1_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; if (cur_salt->magic != MAGIC) { return pDynamicFmt->methods.cmp_all(binary, count); } for (; index < count; index++) if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { if (cur_salt->magic != MAGIC) { return pDynamicFmt->methods.cmp_one(binary, index); } return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void netsha1_set_key(char *key, int index) { if (dyna_salt_seen) pDynamicFmt->methods.set_key(key, index); /* strncpy will pad with zeros, which is needed */ strncpy(saved_key[index], key, sizeof(saved_key[0])); } static char *get_key(int index) { return saved_key[index]; } static char *prepare(char *fields[10], struct fmt_main *self) { static char buf[sizeof(cur_salt->salt)*2+TAG_LENGTH+1]; char *hash = fields[1]; if (strncmp(hash, FORMAT_TAG, TAG_LENGTH) && valid(hash, self)) { get_ptr(); if (text_in_dynamic_format_already(pDynamicFmt, hash)) return hash; sprintf(buf, "%s%s", FORMAT_TAG, hash); return buf; } return hash; } struct fmt_main fmt_netsha1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, netsha1_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; static void get_ptr() { if (!pDynamicFmt) { char *Buf; pNetSha1_Dyna = mem_alloc_tiny(sizeof(fmt_netsha1), 16); memcpy(pNetSha1_Dyna, &fmt_netsha1, sizeof(fmt_netsha1)); pDynamicFmt = dynamic_THIN_FORMAT_LINK(pNetSha1_Dyna, Convert(Conv_Buf, tests[1].ciphertext), "net-sha1", 0); fmt_netsha1.params.min_keys_per_crypt = pDynamicFmt->params.min_keys_per_crypt; fmt_netsha1.params.max_keys_per_crypt = pDynamicFmt->params.max_keys_per_crypt; Buf = mem_alloc_tiny(strlen(fmt_netsha1.params.algorithm_name) + 4 + strlen("dynamic_40") + 1, 1); sprintf(Buf, "%s or %s", fmt_netsha1.params.algorithm_name, "dynamic_40"); fmt_netsha1.params.algorithm_name = Buf; //pDynamicFmt->methods.init(pDynamicFmt); } } static void init(struct fmt_main *self) { // We have to allocate our dyna_40 object first, because we get 'modified' min/max counts from there. get_ptr(); if (self->private.initialized == 0) { pDynamicFmt = dynamic_THIN_FORMAT_LINK(pNetSha1_Dyna, Convert(Conv_Buf, tests[1].ciphertext), "net-sha1", 1); self->private.initialized = 1; } saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } #endif /* plugin stanza */
explicit_dt.h
/* ============================================================================== KratosPFEMApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: anonymous $ // Date: $Date: 2008-11-19 15:38:01 $ // Revision: $Revision: 1.1 $ // // #if !defined(KRATOS_EXPLICIT_DT_INCLUDED) #define KRATOS_EXPLICIT_DT_INCLUDED #include <string> #include <iostream> #include <algorithm> #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "utilities/geometry_utilities.h" #include "geometries/triangle_2d_3.h" #include "utilities/openmp_utils.h" //#include "kratos/applications/MeshingApplication/meshing_application.h" namespace Kratos { class ExplicitDtProcess : public Process { public: ExplicitDtProcess(double CFL,double min_dt,double max_dt, ModelPart& ThisModelPart ) :Process(), cfl(CFL),Min_dt(min_dt),Max_dt(max_dt), mr_model_part(ThisModelPart) { } /// Destructor. virtual ~ExplicitDtProcess() { } ///@} ///@name Operators ///@{ void operator()() { Execute(); } virtual void Execute() { KRATOS_TRY int NumThreads = OpenMPUtils::GetNumThreads(); std::vector< double > Threads_dt(NumThreads,10.0); ModelPart::ElementsContainerType::iterator elem_bg = mr_model_part.ElementsBegin(); int n_elems = mr_model_part.Elements().size(); #pragma omp parallel for firstprivate(n_elems, elem_bg) for(int ii=0; ii<n_elems; ++ii) { //calculate min_dt ModelPart::ElementsContainerType::iterator elem = elem_bg + ii; double calc_dt = 1.0; elem->Calculate(DELTA_TIME, calc_dt, mr_model_part.GetProcessInfo()); int k = OpenMPUtils::ThisThread(); if(calc_dt < Threads_dt[k]) Threads_dt[k] = calc_dt; } #pragma omp barrier //KRATOS_WATCH(omp_get_thread_num()); KRATOS_WATCH(NumThreads); double DT = Max_dt; for(int kk=0; kk<NumThreads; ++kk) if( Threads_dt[kk] < DT) DT = Threads_dt[kk]; if(DT < Min_dt) DT = Min_dt; // double DT = 0.00000001; DT*=cfl; mr_model_part.GetProcessInfo()[DELTA_TIME] = DT; KRATOS_WATCH("ExplicitDeltaT"); KRATOS_WATCH(DT); // return DT; KRATOS_WATCH("++++++++++++++++++++END OF ExplicitDtProcess PROCESS ^^^^^^^^^^^^^^^^^^^^^^"); KRATOS_CATCH("") } private: double cfl,Min_dt,Max_dt; ModelPart& mr_model_part; }; }//namespace kratos #endif
cgbtrf.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgbtrf.c, normal z -> c, Fri Sep 28 17:38:04 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_gbtrf * * Computes an LU factorization of a real m-by-n band matrix A * using partial pivoting with row interchanges. * ******************************************************************************* * * @param[in] m * The number of rows of the matrix A. n >= 0. * * @param[in] n * The number of columns of the matrix A. n >= 0. * * @param[in] kl * The number of subdiagonals within the band of A. kl >= 0. * * @param[in] ku * The number of superdiagonals within the band of A. ku >= 0. * * @param[in,out] AB * Details of the LU factorization of the band matrix A, as * computed by plasma_cgbtrf. * * @param[in] ldab * The leading dimension of the array AB. * * @param[out] ipiv * The pivot indices; for 1 <= i <= min(m,n), row i of the * matrix was interchanged with row ipiv(i). * ******************************************************************************/ int plasma_cgbtrf(int m, int n, int kl, int ku, plasma_complex32_t *pAB, int ldab, int *ipiv) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (kl < 0) { plasma_error("illegal value of kl"); return -3; } if (ku < 0) { plasma_error("illegal value of ku"); return -4; } if (ldab < imax(1, 1+kl+ku)) { plasma_error("illegal value of ldab"); return -6; } // quick return // Tune parameters. if (plasma->tuning) plasma_tune_gbtrf(plasma, PlasmaComplexFloat, n, kl+ku+1); // Set tiling parameters. int nb = plasma->nb; // Initialize barrier. plasma_barrier_init(&plasma->barrier); // Create tile matrix. plasma_desc_t AB; int tku = (ku+kl+nb-1)/nb; // number of tiles in upper band (not including diagonal) int tkl = (kl+nb-1)/nb; // number of tiles in lower band (not including diagonal) int lm = (tku+tkl+1)*nb; // since we use cgetrf on panel, we pivot back within panel. // this could fill the last tile of the panel, // and we need extra NB space on the bottom int retval; retval = plasma_desc_general_band_create(PlasmaComplexFloat, PlasmaGeneral, nb, nb, lm, n, 0, 0, m, n, kl, ku, &AB); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cpb2desc(pAB, ldab, AB, &sequence, &request); } #pragma omp parallel #pragma omp master { // Call the tile async function. plasma_omp_cgbtrf(AB, ipiv, &sequence, &request); } #pragma omp parallel #pragma omp master { // Translate back to LAPACK layout. plasma_omp_cdesc2pb(AB, pAB, ldab, &sequence, &request); } // Free matrix A in tile layout. plasma_desc_destroy(&AB); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * Computes an LU factorization of a real m-by-n band matrix A * using partial pivoting with row interchanges. * Non-blocking tile version of plasma_cgbsv(). * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in,out] AB * Descriptor of matrix A. * * @param[out] ipiv * The pivot indices; for 1 <= i <= min(m,n), row i of the * matrix was interchanged with row ipiv(i). * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************/ void plasma_omp_cgbtrf(plasma_desc_t AB, int *ipiv, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(AB) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid AB"); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Call the parallel function. plasma_pcgbtrf(AB, ipiv, sequence, request); }
convect_particles_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Pablo Becker // // #if !defined(KRATOS_CONVECT_PARTICLES_UTILITIES_INCLUDED ) #define KRATOS_CONVECT_PARTICLES_UTILITIES_INCLUDED #define PRESSURE_ON_EULERIAN_MESH #define USE_FEW_PARTICLES // System includes #include <string> #include <iostream> #include <algorithm> // External includes #ifdef _OPENMP #include "omp.h" #endif // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "utilities/geometry_utilities.h" #include "geometries/tetrahedra_3d_4.h" #include "includes/variables.h" #include "spatial_containers/spatial_containers.h" #include "utilities/timer.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/timer.h" namespace Kratos { template<std::size_t TDim> class ParticleConvectUtily { public: KRATOS_CLASS_POINTER_DEFINITION(ParticleConvectUtily<TDim>); ParticleConvectUtily(typename BinBasedFastPointLocator<TDim>::Pointer pSearchStructure) : mpSearchStructure(pSearchStructure) { } ~ParticleConvectUtily() { } //********************************************************************************************** //********************************************************************************************** ///this function moves all the nodes contained in rModelPart from their position at time tn to the one at time ///tn+1 by following the trajectories. This is done by performing "subdivions" forward euler steps within each time step ///@param rModelPart the model part on which we work ///@param subdivisions number of forward euler substeps used in advancing in time void MoveParticles_Substepping(ModelPart& rModelPart, unsigned int subdivisions) { KRATOS_TRY const double dt = rModelPart.GetProcessInfo()[DELTA_TIME]; const double small_dt = dt/ static_cast<double>(subdivisions); //do movement array_1d<double, 3 > veulerian; array_1d<double, 3 > acc_particle; Vector N(TDim + 1); const int max_results = rModelPart.Nodes().size(); typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N,veulerian,acc_particle) for (int i = 0; i < nparticles; i++) { unsigned int substep = 0; ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i; Node < 3 > ::Pointer pparticle = *(iparticle.base()); array_1d<double,3> current_position = iparticle->GetInitialPosition() + iparticle->FastGetSolutionStepValue(DISPLACEMENT,1); Element::Pointer pelement; bool is_found = false; array_1d<double, 3> aux_point_local_coordinates; while(substep++ < subdivisions) { typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); is_found = false; if(substep > 1 ) //first check if it falls within the same element { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); is_found = geom.IsInside(current_position, aux_point_local_coordinates, 1.0e-5); geom.ShapeFunctionsValues(N, aux_point_local_coordinates); if(is_found == false) is_found = mpSearchStructure->FindPointOnMesh(current_position, N, pelement, result_begin, max_results); } else //if not found use the search structure { is_found = mpSearchStructure->FindPointOnMesh(current_position, N, pelement, result_begin, max_results); } (iparticle)->Set(TO_ERASE, true); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); const double new_step_factor = static_cast<double>(substep)/subdivisions; const double old_step_factor = 1.0 - new_step_factor; noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY,1)); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY,1) ); noalias(current_position) += small_dt*veulerian; (iparticle)->Set(TO_ERASE, false); } else break; } if (is_found == true) { iparticle->FastGetSolutionStepValue(DISPLACEMENT) = current_position - iparticle->GetInitialPosition(); noalias(pparticle->Coordinates()) = current_position; } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** ///this function moves the mesh as xn+1 = xn + vn*dt and sets the mesh velocity to vn ///@param rModelPart the model part on which we work void MoveParticles_RK4(ModelPart& rModelPart) { KRATOS_TRY const double dt = rModelPart.GetProcessInfo()[DELTA_TIME]; //do movement array_1d<double, 3 > v1,v2,v3,v4,vtot,x; Vector N(TDim + 1); const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N,v1,v2,v3,v4,vtot,x) for (int i = 0; i < nparticles; i++) { typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i; Node < 3 > ::Pointer pparticle = *(iparticle.base()); array_1d<double,3> initial_position = iparticle->GetInitialPosition() + iparticle->FastGetSolutionStepValue(DISPLACEMENT,1); Element::Pointer pelement; bool is_found = false; //STEP1 { is_found = mpSearchStructure->FindPointOnMesh(initial_position, N, pelement, result_begin, max_results); if( is_found == false) goto end_of_particle; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); noalias(v1) = N[0] * ( geom[0].FastGetSolutionStepValue(VELOCITY,1)); for (unsigned int k = 1; k < geom.size(); k++) noalias(v1) += N[k] * ( geom[k].FastGetSolutionStepValue(VELOCITY,1) ); } //STEP2 // if(is_found == true) { noalias(x) = initial_position + (0.5*dt)*v1; is_found = mpSearchStructure->FindPointOnMesh(x, N, pelement, result_begin, max_results); if( is_found == false) goto end_of_particle; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); const double new_step_factor = 0.5; const double old_step_factor = 0.5; noalias(v2) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY,1)); for (unsigned int k = 1; k < geom.size(); k++) noalias(v2) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY,1) ); } //STEP3 // if(is_found == true) { const array_1d<double,3> x = initial_position + (0.5*dt)*v2; is_found = mpSearchStructure->FindPointOnMesh(x, N, pelement, result_begin, max_results); if( is_found == false) goto end_of_particle; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); const double new_step_factor = 0.5; //as the step before const double old_step_factor = 0.5; noalias(v3) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY,1)); for (unsigned int k = 1; k < geom.size(); k++) noalias(v3) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY,1) ); } //STEP4 // if(is_found == true) { const array_1d<double,3> x = initial_position + (dt)*v3; is_found = mpSearchStructure->FindPointOnMesh(x, N, pelement, result_begin, max_results); if( is_found == false) goto end_of_particle; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); noalias(v4) = N[0] * ( geom[0].FastGetSolutionStepValue(VELOCITY)); for (unsigned int k = 1; k < geom.size(); k++) noalias(v4) += N[k] * ( geom[k].FastGetSolutionStepValue(VELOCITY) ); } (iparticle)->Set(TO_ERASE, false); //finalize step noalias(x) = initial_position; noalias(x) += 0.16666666666666666666667*dt*v1; noalias(x) += 0.33333333333333333333333*dt*v2; noalias(x) += 0.33333333333333333333333*dt*v3; noalias(x) += 0.16666666666666666666667*dt*v4; iparticle->FastGetSolutionStepValue(DISPLACEMENT) = x - iparticle->GetInitialPosition(); noalias(pparticle->Coordinates()) = x; end_of_particle: (iparticle)->Set(TO_ERASE, true); } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** ///this function erases the elements and conditions which have at least one node marked for erase ///@param rModelPart the model part on which we work void EraseOuterElements(ModelPart& rModelPart) { KRATOS_TRY int nerased_el = 0; for(ModelPart::ElementsContainerType::iterator it = rModelPart.ElementsBegin(); it!=rModelPart.ElementsEnd(); it++) { Geometry< Node<3> >& geom = it->GetGeometry(); // bool erase_el = false; for(unsigned int i=0; i<geom.size(); i++) { if(geom[i].Is(TO_ERASE)) { it->Set(TO_ERASE,true); nerased_el++; break; } } } if(nerased_el > 0) { ModelPart::ElementsContainerType temp_elems_container; temp_elems_container.reserve(rModelPart.Elements().size() - nerased_el); temp_elems_container.swap(rModelPart.Elements()); for(ModelPart::ElementsContainerType::iterator it = temp_elems_container.begin() ; it != temp_elems_container.end() ; it++) { if( it->IsNot(TO_ERASE) ) (rModelPart.Elements()).push_back(*(it.base())); } } KRATOS_CATCH("") } private: typename BinBasedFastPointLocator<TDim>::Pointer mpSearchStructure; }; } // namespace Kratos. #endif // KRATOS_CONVECT_PARTICLES_UTILITIES_INCLUDED defined
set_grid_props_and_dt.c
/* This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license. Github repository: https://github.com/OpenNWP/GAME */ /* This file contains functions for reading the grid properties as well as setting the time step. */ #include <stdlib.h> #include <stdio.h> #include <netcdf.h> #include <geos95.h> #include <atmostracers.h> #include "../game_types.h" #include "../game_constants.h" #include "../spatial_operators/spatial_operators.h" #define ERRCODE 2 #define ERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(ERRCODE);} int set_grid_properties(Grid *grid, Dualgrid *dualgrid, char GEO_PROP_FILE[]) { /* This function reads all the grid properties from the grid netcdf file. */ int ncid, retval; int normal_distance_id, volume_id, area_id, z_scalar_id, z_vector_id, trsk_weights_id, area_dual_id, z_vector_dual_id, f_vec_id, to_index_id, from_index_id, to_index_dual_id, from_index_dual_id, adjacent_vector_indices_h_id, trsk_indices_id, trsk_modified_curl_indices_id, adjacent_signs_h_id, direction_id, gravity_potential_id, inner_product_weights_id, density_to_rhombi_weights_id, density_to_rhombi_indices_id, normal_distance_dual_id, vorticity_indices_triangles_id, vorticity_signs_triangles_id, latitude_scalar_id, longitude_scalar_id, no_of_shaded_points_scalar_id, no_of_shaded_points_vector_id, toa_id, vert_grid_type_id, interpol_indices_id, interpol_weights_id, theta_bg_id, exner_bg_id, sfc_rho_c_id, sfc_albedo_id, roughness_length_id, is_land_id, t_conductivity_id, no_of_oro_layers_id, stretching_parameter_id; if ((retval = nc_open(GEO_PROP_FILE, NC_NOWRITE, &ncid))) ERR(retval); if ((retval = nc_inq_varid(ncid, "vert_grid_type", &vert_grid_type_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "no_of_oro_layers", &no_of_oro_layers_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "toa", &toa_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "stretching_parameter", &stretching_parameter_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "normal_distance", &normal_distance_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "volume", &volume_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "area", &area_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "z_scalar", &z_scalar_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "theta_bg", &theta_bg_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "exner_bg", &exner_bg_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "gravity_potential", &gravity_potential_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "z_vector", &z_vector_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "trsk_weights", &trsk_weights_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "area_dual", &area_dual_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "z_vector_dual", &z_vector_dual_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "f_vec", &f_vec_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "to_index", &to_index_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "to_index_dual", &to_index_dual_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "direction", &direction_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "normal_distance_dual", &normal_distance_dual_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "from_index", &from_index_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "from_index_dual", &from_index_dual_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "adjacent_vector_indices_h", &adjacent_vector_indices_h_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "vorticity_indices_triangles", &vorticity_indices_triangles_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "vorticity_signs_triangles", &vorticity_signs_triangles_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "trsk_indices", &trsk_indices_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "trsk_modified_curl_indices", &trsk_modified_curl_indices_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "adjacent_signs_h", &adjacent_signs_h_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "inner_product_weights", &inner_product_weights_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "density_to_rhombi_weights", &density_to_rhombi_weights_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "density_to_rhombi_indices", &density_to_rhombi_indices_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "latitude_scalar", &latitude_scalar_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "longitude_scalar", &longitude_scalar_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "no_of_shaded_points_scalar", &no_of_shaded_points_scalar_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "no_of_shaded_points_vector", &no_of_shaded_points_vector_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "interpol_indices", &interpol_indices_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "interpol_weights", &interpol_weights_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "sfc_rho_c", &sfc_rho_c_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "sfc_albedo", &sfc_albedo_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "roughness_length", &roughness_length_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "t_conductivity", &t_conductivity_id))) ERR(retval); if ((retval = nc_inq_varid(ncid, "is_land", &is_land_id))) ERR(retval); if ((retval = nc_get_var_int(ncid, vert_grid_type_id, &(grid -> vert_grid_type)))) ERR(retval); if ((retval = nc_get_var_int(ncid, no_of_oro_layers_id, &(grid -> no_of_oro_layers)))) ERR(retval); if ((retval = nc_get_var_double(ncid, toa_id, &(grid -> toa)))) ERR(retval); if ((retval = nc_get_var_double(ncid, stretching_parameter_id, &(grid -> stretching_parameter)))) ERR(retval); if ((retval = nc_get_var_double(ncid, normal_distance_id, &(grid -> normal_distance[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, inner_product_weights_id, &(grid -> inner_product_weights[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, volume_id, &(grid -> volume[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, area_id, &(grid -> area[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, z_scalar_id, &(grid -> z_scalar[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, theta_bg_id, &(grid -> theta_bg[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, exner_bg_id, &(grid -> exner_bg[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, gravity_potential_id, &(grid -> gravity_potential[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, z_vector_id, &(grid -> z_vector[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, trsk_weights_id, &(grid -> trsk_weights[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, area_dual_id, &(dualgrid -> area[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, z_vector_dual_id, &(dualgrid -> z_vector[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, direction_id, &(grid -> direction[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, f_vec_id, &(dualgrid -> f_vec[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, density_to_rhombi_weights_id, &(grid -> density_to_rhombi_weights[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, normal_distance_dual_id, &(dualgrid -> normal_distance[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, latitude_scalar_id, &(grid -> latitude_scalar[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, longitude_scalar_id, &(grid -> longitude_scalar[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, interpol_weights_id, &(grid -> latlon_interpol_weights[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, from_index_id, &(grid -> from_index[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, to_index_id, &(grid -> to_index[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, from_index_dual_id, &(dualgrid -> from_index[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, to_index_dual_id, &(dualgrid -> to_index[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, adjacent_vector_indices_h_id, &(grid -> adjacent_vector_indices_h[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, vorticity_indices_triangles_id, &(dualgrid -> vorticity_indices_triangles[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, vorticity_signs_triangles_id, &(dualgrid -> vorticity_signs_triangles[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, trsk_indices_id, &(grid -> trsk_indices[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, trsk_modified_curl_indices_id, &(grid -> trsk_modified_curl_indices[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, adjacent_signs_h_id, &(grid -> adjacent_signs_h[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, density_to_rhombi_indices_id, &(grid -> density_to_rhombi_indices[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, no_of_shaded_points_scalar_id, &(grid -> no_of_shaded_points_scalar[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, no_of_shaded_points_vector_id, &(grid -> no_of_shaded_points_vector[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, interpol_indices_id, &(grid -> latlon_interpol_indices[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, sfc_rho_c_id, &(grid -> sfc_rho_c[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, sfc_albedo_id, &(grid -> sfc_albedo[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, roughness_length_id, &(grid -> roughness_length[0])))) ERR(retval); if ((retval = nc_get_var_double(ncid, t_conductivity_id, &(grid -> t_conduc_soil[0])))) ERR(retval); if ((retval = nc_get_var_int(ncid, is_land_id, &(grid -> is_land[0])))) ERR(retval); if ((retval = nc_close(ncid))) ERR(retval); #pragma omp parallel for for (int i = 0; i < 6*NO_OF_SCALARS_H; ++i) { if (grid -> adjacent_vector_indices_h[i] == -1) { grid -> adjacent_vector_indices_h[i] = 0; } } // determining coordinate slopes grad_hor_cov(grid -> z_scalar, grid -> slope, grid); // computing the gradient of the gravity potential grad(grid -> gravity_potential, grid -> gravity_m, grid); // computing the gradient of the background Exner pressure grad(grid -> exner_bg, grid -> exner_bg_grad, grid); // fundamental SFC properties grid -> z_t_const = -10.0; grid -> t_const_soil = T_0 + 15; /* constructing the soil grid -------------------------- */ double sigma_soil = 0.8; // the surface is always at zero grid -> z_soil_interface[0] = 0; for (int i = 1; i < NO_OF_SOIL_LAYERS + 1; ++i) { grid -> z_soil_interface[i] = grid -> z_soil_interface[i - 1] + pow(sigma_soil, NO_OF_SOIL_LAYERS - i); } double rescale_factor = grid -> z_t_const/grid -> z_soil_interface[NO_OF_SOIL_LAYERS]; for (int i = 1; i < NO_OF_SOIL_LAYERS + 1; ++i) { grid -> z_soil_interface[i] = rescale_factor*grid -> z_soil_interface[i]; } for (int i = 0; i < NO_OF_SOIL_LAYERS; ++i) { grid -> z_soil_center[i] = 0.5*(grid -> z_soil_interface[i] + grid -> z_soil_interface[i + 1]); } return 0; }
cg.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - CG This benchmark is an OpenMP C version of the NPB CG code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: M. Yarrow C. Kuszmaul OpenMP C version: S. Satoh --------------------------------------------------------------------*/ /* c--------------------------------------------------------------------- c Note: please observe that in the routine conj_grad three c implementations of the sparse matrix-vector multiply have c been supplied. The default matrix-vector multiply is not c loop unrolled. The alternate implementations are unrolled c to a depth of 2 and unrolled to a depth of 8. Please c experiment with these to find the fastest for your particular c architecture. If reporting timing results, any of these three may c be used without penalty. c--------------------------------------------------------------------- */ #include "npb-C.h" #include "npbparams.h" #define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2) /* global variables */ /* common /partit_size/ */ static int naa; static int nzz; static int firstrow; static int lastrow; static int firstcol; static int lastcol; /* common /main_int_mem/ */ static int colidx[NZ+1]; /* colidx[1:NZ] */ static int rowstr[NA+1+1]; /* rowstr[1:NA+1] */ static int iv[2*NA+1+1]; /* iv[1:2*NA+1] */ static int arow[NZ+1]; /* arow[1:NZ] */ static int acol[NZ+1]; /* acol[1:NZ] */ /* common /main_flt_mem/ */ static double v[NA+1+1]; /* v[1:NA+1] */ static double aelt[NZ+1]; /* aelt[1:NZ] */ static double a[NZ+1]; /* a[1:NZ] */ static double x[NA+2+1]; /* x[1:NA+2] */ static double z[NA+2+1]; /* z[1:NA+2] */ static double p[NA+2+1]; /* p[1:NA+2] */ static double q[NA+2+1]; /* q[1:NA+2] */ static double r[NA+2+1]; /* r[1:NA+2] */ static double w[NA+2+1]; /* w[1:NA+2] */ /* common /urando/ */ static double amult; static double tran; /* function declarations */ static void conj_grad (int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double w[], double *rnorm); static void makea(int n, int nz, double a[], int colidx[], int rowstr[], int nonzer, int firstrow, int lastrow, int firstcol, int lastcol, double rcond, int arow[], int acol[], double aelt[], double v[], int iv[], double shift ); static void sparse(double a[], int colidx[], int rowstr[], int n, int arow[], int acol[], double aelt[], int firstrow, int lastrow, double x[], boolean mark[], int nzloc[], int nnza); static void sprnvc(int n, int nz, double v[], int iv[], int nzloc[], int mark[]); static int icnvrt(double x, int ipwr2); static void vecset(int n, double v[], int iv[], int *nzv, int i, double val); /*-------------------------------------------------------------------- program cg --------------------------------------------------------------------*/ int main(int argc, char **argv) { int i, j, k, it; int nthreads = 1; double zeta; double rnorm; double norm_temp11; double norm_temp12; double t, mflops; char cclass; boolean verified; double zeta_verify_value, epsilon; firstrow = 1; lastrow = NA; firstcol = 1; lastcol = NA; if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0) { cclass = 'S'; zeta_verify_value = 8.5971775078648; } else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0) { cclass = 'W'; zeta_verify_value = 10.362595087124; } else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0) { cclass = 'A'; zeta_verify_value = 17.130235054029; } else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0) { cclass = 'B'; zeta_verify_value = 22.712745482631; } else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0) { cclass = 'C'; zeta_verify_value = 28.973605592845; } else { cclass = 'U'; } printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - CG Benchmark\n"); printf(" Size: %10d\n", NA); printf(" Iterations: %5d\n", NITER); naa = NA; nzz = NZ; /*-------------------------------------------------------------------- c Initialize random number generator c-------------------------------------------------------------------*/ tran = 314159265.0; amult = 1220703125.0; zeta = randlc( &tran, amult ); /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ makea(naa, nzz, a, colidx, rowstr, NONZER, firstrow, lastrow, firstcol, lastcol, RCOND, arow, acol, aelt, v, iv, SHIFT); /*--------------------------------------------------------------------- c Note: as a result of the above call to makea: c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1 c values of colidx which are col indexes go from firstcol --> lastcol c So: c Shift the col index vals from actual (firstcol --> lastcol ) c to local, i.e., (1 --> lastcol-firstcol+1) c---------------------------------------------------------------------*/ #pragma omp parallel private(it,i,j,k) { #pragma omp for nowait for (j = 1; j <= lastrow - firstrow + 1; j++) { for (k = rowstr[j]; k < rowstr[j+1]; k++) { colidx[k] = colidx[k] - firstcol + 1; } } /*-------------------------------------------------------------------- c set starting vector to (1, 1, .... 1) c-------------------------------------------------------------------*/ #pragma omp for nowait for (i = 1; i <= NA+1; i++) { x[i] = 1.0; } #pragma omp single zeta = 0.0; /*------------------------------------------------------------------- c----> c Do one iteration untimed to init all code and data page tables c----> (then reinit, start timing, to niter its) c-------------------------------------------------------------------*/ for (it = 1; it <= 1; it++) { /*-------------------------------------------------------------------- c The call to the conjugate gradient routine: c-------------------------------------------------------------------*/ conj_grad (colidx, rowstr, x, z, a, p, q, r, w, &rnorm); /*-------------------------------------------------------------------- c zeta = shift + 1/(x.z) c So, first: (x.z) c Also, find norm of z c So, first: (z.z) c-------------------------------------------------------------------*/ #pragma omp single { norm_temp11 = 0.0; norm_temp12 = 0.0; } /* end single */ #pragma omp for reduction(+:norm_temp11,norm_temp12) for (j = 1; j <= lastcol-firstcol+1; j++) { norm_temp11 = norm_temp11 + x[j]*z[j]; norm_temp12 = norm_temp12 + z[j]*z[j]; } #pragma omp single norm_temp12 = 1.0 / sqrt( norm_temp12 ); /*-------------------------------------------------------------------- c Normalize z to obtain x c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { x[j] = norm_temp12*z[j]; } } /* end of do one iteration untimed */ /*-------------------------------------------------------------------- c set starting vector to (1, 1, .... 1) c-------------------------------------------------------------------*/ #pragma omp for nowait for (i = 1; i <= NA+1; i++) { x[i] = 1.0; } #pragma omp single zeta = 0.0; } /* end parallel */ timer_clear( 1 ); timer_start( 1 ); /*-------------------------------------------------------------------- c----> c Main Iteration for inverse power method c----> c-------------------------------------------------------------------*/ #pragma omp parallel private(it,i,j,k) { for (it = 1; it <= NITER; it++) { /*-------------------------------------------------------------------- c The call to the conjugate gradient routine: c-------------------------------------------------------------------*/ conj_grad(colidx, rowstr, x, z, a, p, q, r, w, &rnorm); /*-------------------------------------------------------------------- c zeta = shift + 1/(x.z) c So, first: (x.z) c Also, find norm of z c So, first: (z.z) c-------------------------------------------------------------------*/ #pragma omp single { norm_temp11 = 0.0; norm_temp12 = 0.0; } /* end single */ #pragma omp for reduction(+:norm_temp11,norm_temp12) for (j = 1; j <= lastcol-firstcol+1; j++) { norm_temp11 = norm_temp11 + x[j]*z[j]; norm_temp12 = norm_temp12 + z[j]*z[j]; } #pragma omp single { norm_temp12 = 1.0 / sqrt( norm_temp12 ); zeta = SHIFT + 1.0 / norm_temp11; } /* end single */ #pragma omp master { if( it == 1 ) { printf(" iteration ||r|| zeta\n"); } printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta); } /* end master */ /*-------------------------------------------------------------------- c Normalize z to obtain x c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { x[j] = norm_temp12*z[j]; } } /* end of main iter inv pow meth */ #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop( 1 ); /*-------------------------------------------------------------------- c End of timed section c-------------------------------------------------------------------*/ t = timer_read( 1 ); printf(" Benchmark completed\n"); epsilon = 1.0e-10; if (cclass != 'U') { if (fabs(zeta - zeta_verify_value) <= epsilon) { verified = TRUE; printf(" VERIFICATION SUCCESSFUL\n"); printf(" Zeta is %20.12e\n", zeta); printf(" Error is %20.12e\n", zeta - zeta_verify_value); } else { verified = FALSE; printf(" VERIFICATION FAILED\n"); printf(" Zeta %20.12e\n", zeta); printf(" The correct zeta is %20.12e\n", zeta_verify_value); } } else { verified = FALSE; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if ( t != 0.0 ) { mflops = (2.0*NITER*NA) * (3.0+(NONZER*(NONZER+1)) + 25.0*(5.0+(NONZER*(NONZER+1))) + 3.0 ) / t / 1000000.0; } else { mflops = 0.0; } c_print_results("CG", cclass, NA, 0, 0, NITER, nthreads, t, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void conj_grad ( int colidx[], /* colidx[1:nzz] */ int rowstr[], /* rowstr[1:naa+1] */ double x[], /* x[*] */ double z[], /* z[*] */ double a[], /* a[1:nzz] */ double p[], /* p[*] */ double q[], /* q[*] */ double r[], /* r[*] */ double w[], /* w[*] */ double *rnorm ) /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*--------------------------------------------------------------------- c Floaging point arrays here are named as in NPB1 spec discussion of c CG algorithm c---------------------------------------------------------------------*/ { static double d, sum, rho, rho0, alpha, beta; int i, j, k; int cgit, cgitmax = 25; #pragma omp single nowait rho = 0.0; /*-------------------------------------------------------------------- c Initialize the CG algorithm: c-------------------------------------------------------------------*/ #pragma omp for nowait for (j = 1; j <= naa+1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; w[j] = 0.0; } /*-------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c-------------------------------------------------------------------*/ #pragma omp for reduction(+:rho) for (j = 1; j <= lastcol-firstcol+1; j++) { rho = rho + x[j]*x[j]; } /*-------------------------------------------------------------------- c----> c The conj grad iteration loop c----> c-------------------------------------------------------------------*/ for (cgit = 1; cgit <= cgitmax; cgit++) { #pragma omp single nowait { rho0 = rho; d = 0.0; rho = 0.0; } /* end single */ /*-------------------------------------------------------------------- c q = A.p c The partition submatrix-vector multiply: use workspace w c--------------------------------------------------------------------- C C NOTE: this version of the multiply is actually (slightly: maybe %5) C faster on the sp2 on 16 nodes than is the unrolled-by-2 version C below. On the Cray t3d, the reverse is true, i.e., the C unrolled-by-two version is some 10% faster. C The unrolled-by-8 version below is significantly faster C on the Cray t3d - overall speed of code is 1.5 times faster. */ /* rolled version */ #pragma omp for private(sum,k) for (j = 1; j <= lastrow-firstrow+1; j++) { sum = 0.0; for (k = rowstr[j]; k < rowstr[j+1]; k++) { sum = sum + a[k]*p[colidx[k]]; } w[j] = sum; } /* unrolled-by-two version #pragma omp for private(i,k) for (j = 1; j <= lastrow-firstrow+1; j++) { int iresidue; double sum1, sum2; i = rowstr[j]; iresidue = (rowstr[j+1]-i) % 2; sum1 = 0.0; sum2 = 0.0; if (iresidue == 1) sum1 = sum1 + a[i]*p[colidx[i]]; for (k = i+iresidue; k <= rowstr[j+1]-2; k += 2) { sum1 = sum1 + a[k] * p[colidx[k]]; sum2 = sum2 + a[k+1] * p[colidx[k+1]]; } w[j] = sum1 + sum2; } */ /* unrolled-by-8 version #pragma omp for private(i,k,sum) for (j = 1; j <= lastrow-firstrow+1; j++) { int iresidue; i = rowstr[j]; iresidue = (rowstr[j+1]-i) % 8; sum = 0.0; for (k = i; k <= i+iresidue-1; k++) { sum = sum + a[k] * p[colidx[k]]; } for (k = i+iresidue; k <= rowstr[j+1]-8; k += 8) { sum = sum + a[k ] * p[colidx[k ]] + a[k+1] * p[colidx[k+1]] + a[k+2] * p[colidx[k+2]] + a[k+3] * p[colidx[k+3]] + a[k+4] * p[colidx[k+4]] + a[k+5] * p[colidx[k+5]] + a[k+6] * p[colidx[k+6]] + a[k+7] * p[colidx[k+7]]; } w[j] = sum; } */ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { q[j] = w[j]; } /*-------------------------------------------------------------------- c Clear w for reuse... c-------------------------------------------------------------------*/ #pragma omp for nowait for (j = 1; j <= lastcol-firstcol+1; j++) { w[j] = 0.0; } /*-------------------------------------------------------------------- c Obtain p.q c-------------------------------------------------------------------*/ #pragma omp for reduction(+:d) for (j = 1; j <= lastcol-firstcol+1; j++) { d = d + p[j]*q[j]; } /*-------------------------------------------------------------------- c Obtain alpha = rho / (p.q) c-------------------------------------------------------------------*/ #pragma omp single alpha = rho0 / d; /*-------------------------------------------------------------------- c Save a temporary of rho c-------------------------------------------------------------------*/ /* rho0 = rho;*/ /*--------------------------------------------------------------------- c Obtain z = z + alpha*p c and r = r - alpha*q c---------------------------------------------------------------------*/ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { z[j] = z[j] + alpha*p[j]; r[j] = r[j] - alpha*q[j]; } /*--------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c---------------------------------------------------------------------*/ #pragma omp for reduction(+:rho) for (j = 1; j <= lastcol-firstcol+1; j++) { rho = rho + r[j]*r[j]; } /*-------------------------------------------------------------------- c Obtain beta: c-------------------------------------------------------------------*/ #pragma omp single beta = rho / rho0; /*-------------------------------------------------------------------- c p = r + beta*p c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { p[j] = r[j] + beta*p[j]; } } /* end of do cgit=1,cgitmax */ /*--------------------------------------------------------------------- c Compute residual norm explicitly: ||r|| = ||x - A.z|| c First, form A.z c The partition submatrix-vector multiply c---------------------------------------------------------------------*/ #pragma omp single nowait sum = 0.0; #pragma omp for private(d, k) for (j = 1; j <= lastrow-firstrow+1; j++) { d = 0.0; for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) { d = d + a[k]*z[colidx[k]]; } w[j] = d; } #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { r[j] = w[j]; } /*-------------------------------------------------------------------- c At this point, r contains A.z c-------------------------------------------------------------------*/ #pragma omp for reduction(+:sum) private(d) for (j = 1; j <= lastcol-firstcol+1; j++) { d = x[j] - r[j]; sum = sum + d*d; } #pragma omp single { (*rnorm) = sqrt(sum); } /* end single */ } /*--------------------------------------------------------------------- c generate the test problem for benchmark 6 c makea generates a sparse matrix with a c prescribed sparsity distribution c c parameter type usage c c input c c n i number of cols/rows of matrix c nz i nonzeros as declared array size c rcond r*8 condition number c shift r*8 main diagonal shift c c output c c a r*8 array for nonzeros c colidx i col indices c rowstr i row pointers c c workspace c c iv, arow, acol i c v, aelt r*8 c---------------------------------------------------------------------*/ static void makea( int n, int nz, double a[], /* a[1:nz] */ int colidx[], /* colidx[1:nz] */ int rowstr[], /* rowstr[1:n+1] */ int nonzer, int firstrow, int lastrow, int firstcol, int lastcol, double rcond, int arow[], /* arow[1:nz] */ int acol[], /* acol[1:nz] */ double aelt[], /* aelt[1:nz] */ double v[], /* v[1:n+1] */ int iv[], /* iv[1:2*n+1] */ double shift ) { int i, nnza, iouter, ivelt, ivelt1, irow, nzv; /*-------------------------------------------------------------------- c nonzer is approximately (int(sqrt(nnza /n))); c-------------------------------------------------------------------*/ double size, ratio, scale; int jcol; size = 1.0; ratio = pow(rcond, (1.0 / (double)n)); nnza = 0; /*--------------------------------------------------------------------- c Initialize colidx(n+1 .. 2n) to zero. c Used by sprnvc to mark nonzero positions c---------------------------------------------------------------------*/ #pragma omp parallel for for (i = 1; i <= n; i++) { colidx[n+i] = 0; } for (iouter = 1; iouter <= n; iouter++) { nzv = nonzer; sprnvc(n, nzv, v, iv, &(colidx[0]), &(colidx[n])); vecset(n, v, iv, &nzv, iouter, 0.5); for (ivelt = 1; ivelt <= nzv; ivelt++) { jcol = iv[ivelt]; if (jcol >= firstcol && jcol <= lastcol) { scale = size * v[ivelt]; for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) { irow = iv[ivelt1]; if (irow >= firstrow && irow <= lastrow) { nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in" " makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = jcol; arow[nnza] = irow; aelt[nnza] = v[ivelt1] * scale; } } } } size = size * ratio; } /*--------------------------------------------------------------------- c ... add the identity * rcond to the generated matrix to bound c the smallest eigenvalue from below by rcond c---------------------------------------------------------------------*/ for (i = firstrow; i <= lastrow; i++) { if (i >= firstcol && i <= lastcol) { iouter = n + i; nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = i; arow[nnza] = i; aelt[nnza] = rcond - shift; } } /*--------------------------------------------------------------------- c ... make the sparse matrix from list of elements with duplicates c (v and iv are used as workspace) c---------------------------------------------------------------------*/ sparse(a, colidx, rowstr, n, arow, acol, aelt, firstrow, lastrow, v, &(iv[0]), &(iv[n]), nnza); } /*--------------------------------------------------- c generate a sparse matrix from a list of c [col, row, element] tri c---------------------------------------------------*/ static void sparse( double a[], /* a[1:*] */ int colidx[], /* colidx[1:*] */ int rowstr[], /* rowstr[1:*] */ int n, int arow[], /* arow[1:*] */ int acol[], /* acol[1:*] */ double aelt[], /* aelt[1:*] */ int firstrow, int lastrow, double x[], /* x[1:n] */ boolean mark[], /* mark[1:n] */ int nzloc[], /* nzloc[1:n] */ int nnza) /*--------------------------------------------------------------------- c rows range from firstrow to lastrow c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values c---------------------------------------------------------------------*/ { int nrows; int i, j, jajp1, nza, k, nzrow; double xi; /*-------------------------------------------------------------------- c how many rows of result c-------------------------------------------------------------------*/ nrows = lastrow - firstrow + 1; /*-------------------------------------------------------------------- c ...count the number of triples in each row c-------------------------------------------------------------------*/ #pragma omp parallel for for (j = 1; j <= n; j++) { rowstr[j] = 0; mark[j] = FALSE; } rowstr[n+1] = 0; for (nza = 1; nza <= nnza; nza++) { j = (arow[nza] - firstrow + 1) + 1; rowstr[j] = rowstr[j] + 1; } rowstr[1] = 1; for (j = 2; j <= nrows+1; j++) { rowstr[j] = rowstr[j] + rowstr[j-1]; } /*--------------------------------------------------------------------- c ... rowstr(j) now is the location of the first nonzero c of row j of a c---------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c ... do a bucket sort of the triples on the row index c-------------------------------------------------------------------*/ for (nza = 1; nza <= nnza; nza++) { j = arow[nza] - firstrow + 1; k = rowstr[j]; a[k] = aelt[nza]; colidx[k] = acol[nza]; rowstr[j] = rowstr[j] + 1; } /*-------------------------------------------------------------------- c ... rowstr(j) now points to the first element of row j+1 c-------------------------------------------------------------------*/ for (j = nrows; j >= 1; j--) { rowstr[j+1] = rowstr[j]; } rowstr[1] = 1; /*-------------------------------------------------------------------- c ... generate the actual output rows by adding elements c-------------------------------------------------------------------*/ nza = 0; #pragma omp parallel for for (i = 1; i <= n; i++) { x[i] = 0.0; mark[i] = FALSE; } jajp1 = rowstr[1]; for (j = 1; j <= nrows; j++) { nzrow = 0; /*-------------------------------------------------------------------- c ...loop over the jth row of a c-------------------------------------------------------------------*/ for (k = jajp1; k < rowstr[j+1]; k++) { i = colidx[k]; x[i] = x[i] + a[k]; if ( mark[i] == FALSE && x[i] != 0.0) { mark[i] = TRUE; nzrow = nzrow + 1; nzloc[nzrow] = i; } } /*-------------------------------------------------------------------- c ... extract the nonzeros of this row c-------------------------------------------------------------------*/ for (k = 1; k <= nzrow; k++) { i = nzloc[k]; mark[i] = FALSE; xi = x[i]; x[i] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i; } } jajp1 = rowstr[j+1]; rowstr[j+1] = nza + rowstr[1]; } } /*--------------------------------------------------------------------- c generate a sparse n-vector (v, iv) c having nzv nonzeros c c mark(i) is set to 1 if position i is nonzero. c mark is all zero on entry and is reset to all zero before exit c this corrects a performance bug found by John G. Lewis, caused by c reinitialization of mark on every one of the n calls to sprnvc ---------------------------------------------------------------------*/ static void sprnvc( int n, int nz, double v[], /* v[1:*] */ int iv[], /* iv[1:*] */ int nzloc[], /* nzloc[1:n] */ int mark[] ) /* mark[1:n] */ { int nn1; int nzrow, nzv, ii, i; double vecelt, vecloc; nzv = 0; nzrow = 0; nn1 = 1; do { nn1 = 2 * nn1; } while (nn1 < n); /*-------------------------------------------------------------------- c nn1 is the smallest power of two not less than n c-------------------------------------------------------------------*/ while (nzv < nz) { vecelt = randlc(&tran, amult); /*-------------------------------------------------------------------- c generate an integer between 1 and n in a portable manner c-------------------------------------------------------------------*/ vecloc = randlc(&tran, amult); i = icnvrt(vecloc, nn1) + 1; if (i > n) continue; /*-------------------------------------------------------------------- c was this integer generated already? c-------------------------------------------------------------------*/ if (mark[i] == 0) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; nzv = nzv + 1; v[nzv] = vecelt; iv[nzv] = i; } } for (ii = 1; ii <= nzrow; ii++) { i = nzloc[ii]; mark[i] = 0; } } /*--------------------------------------------------------------------- * scale a double precision number x in (0,1) by a power of 2 and chop it *---------------------------------------------------------------------*/ static int icnvrt(double x, int ipwr2) { return ((int)(ipwr2 * x)); } /*-------------------------------------------------------------------- c set ith element of sparse vector (v, iv) with c nzv nonzeros to val c-------------------------------------------------------------------*/ static void vecset( int n, double v[], /* v[1:*] */ int iv[], /* iv[1:*] */ int *nzv, int i, double val) { int k; boolean set; set = FALSE; for (k = 1; k <= *nzv; k++) { if (iv[k] == i) { v[k] = val; set = TRUE; } } if (set == FALSE) { *nzv = *nzv + 1; v[*nzv] = val; iv[*nzv] = i; } }
organismsbuffer.h
#pragma once #ifndef ORGANISMSBUFFER_H__ #define ORGANISMSBUFFER_H__ #include "organism.h" #include "rng.h" #include <assert.h> namespace NEAT { template<typename TOrganism = Organism> class OrganismsBuffer { size_t _n; std::vector<TOrganism> _a; std::vector<TOrganism> _b; std::vector<TOrganism> *_curr; std::vector<TOrganism> *_prev; public: OrganismsBuffer(rng_t rng, std::vector<std::unique_ptr<Genome>> &seeds, size_t n, size_t population_index = 0) : _n(n) { _a.reserve(n); _b.reserve(n); _curr = &_a; _prev = &_b; for(size_t i = 0; i < n; i++) { _a.emplace_back(*seeds[i + population_index]); size_t ipop = i + population_index; _a[i].population_index = ipop; _a[i].net->population_index = ipop; _a[i].genome->genome_id = ipop; _a[i].genome->rng.seed(rng.integer()); } for(size_t i = 0; i < n; i++) { _b.emplace_back(*seeds[i + population_index]); size_t ipop = i + population_index; _b[i].population_index = ipop; _b[i].net->population_index = ipop; _b[i].genome->genome_id = ipop; _b[i].genome->rng.seed(rng.integer()); } } void init_phenotypes() { #pragma omp parallel for for(size_t i = 0; i < _n; i++) { Organism &org = curr()[i]; org.genome->init_phenotype(*org.net); } } size_t size(){ return _n; } std::vector<TOrganism> &curr() { return *_curr; } std::vector<TOrganism> &prev() { return *_prev; } void next_generation(int generation) { if(_curr == &_a) { _curr = &_b; _prev = &_a; } else { _curr = &_a; _prev = &_b; } assert( _curr->size() == _n ); for(TOrganism &org: curr()) org.init(generation); } }; } #endif // #ifndef ORGANISMSBUFFER_H__
GB_unop__asinh_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__asinh_fp32_fp32) // op(A') function: GB (_unop_tran__asinh_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = asinhf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = asinhf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = asinhf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ASINH || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__asinh_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = asinhf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = asinhf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__asinh_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
LPfold.c
/* * local pair probabilities for RNA secondary structures * * Stephan Bernhart, Ivo L Hofacker * Vienna RNA package */ /* * todo: compute energy z-score for each window * */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> /* #defines FLT_MAX ... */ #include "ViennaRNA/datastructures/basic.h" #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/plotting/probabilities.h" #include "ViennaRNA/part_func.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/LPfold.h" #include "ViennaRNA/Lfold.h" #include "ViennaRNA/alphabet.h" #include "ViennaRNA/part_func_window.h" /* ################################# # GLOBAL VARIABLES # ################################# */ typedef struct { int bpp_print; /* 1 if pairing probabilities should be written to file-handle, 0 if they are returned as vrna_ep_t */ int up_print; /* 1 if unpaired probabilities should be written to file-handle, 0 if they are returned as array */ FILE *fp_pU; double **pU; FLT_OR_DBL bpp_cutoff; FILE *fp_bpp; vrna_ep_t *bpp; unsigned int bpp_max_size; unsigned int bpp_size; vrna_ep_t *stack_prob; unsigned int stack_prob_size; unsigned int stack_prob_max_size; } default_cb_data; typedef struct { FLT_OR_DBL *prml; FLT_OR_DBL *prm_l; FLT_OR_DBL *prm_l1; double **pU; double **pUO; double **pUI; double **pUM; double **pUH; } helper_arrays; /* soft constraint contributions function (interior-loops) */ typedef FLT_OR_DBL (sc_int)(vrna_fold_compound_t *, int, int, int, int); /* QI5 contribution function for unpaired probability computations */ typedef void (add_QI5)(FLT_OR_DBL **, int, int, FLT_OR_DBL, FLT_OR_DBL); /* ################################# # PRIVATE VARIABLES # ################################# */ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY #ifdef _OPENMP #include <omp.h> #endif /* some backward compatibility stuff */ PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL; PRIVATE int backward_compat = 0; #ifdef _OPENMP #pragma omp threadprivate(backward_compat_compound, backward_compat) #endif #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE void alloc_helper_arrays(vrna_fold_compound_t *vc, int ulength, helper_arrays *aux_arrays, unsigned int options); PRIVATE void free_helper_arrays(vrna_fold_compound_t *vc, int ulength, helper_arrays *aux_arrays, unsigned int options); PRIVATE void compute_probs(vrna_fold_compound_t *vc, int j, helper_arrays *aux_arrays, int ulength, vrna_probs_window_callback *cb, void *data, unsigned int options, int *ov); PRIVATE void make_ptypes(vrna_fold_compound_t *vc, int j); PRIVATE void probability_correction(vrna_fold_compound_t *vc, int i); #if 0 PRIVATE vrna_ep_t * get_deppp(vrna_fold_compound_t *vc, vrna_ep_t *pl, int start); #endif PRIVATE void compute_pU(vrna_fold_compound_t *vc, int k, int ulength, helper_arrays *aux_arrays, vrna_probs_window_callback *cb, void *data, unsigned int options); PRIVATE FLT_OR_DBL * compute_stack_probabilities(vrna_fold_compound_t *vc, int start); PRIVATE void return_pU(int size, int i, int max_size, helper_arrays *aux_arrays, vrna_probs_window_callback *cb, void *data, unsigned int options); PRIVATE void print_bpp_callback(FLT_OR_DBL *pr, int size, int k, void *data); PRIVATE void store_bpp_callback(FLT_OR_DBL *pr, int size, int k, void *data); #if 0 PRIVATE void store_stack_prob_callback(FLT_OR_DBL *pr, int size, int k, void *data); #endif PRIVATE void print_pU_callback(double *pU, int size, int k, int ulength, unsigned int type, void *data); PRIVATE void store_pU_callback(double *pU, int size, int k, int ulength, unsigned int type, void *data); PRIVATE void backward_compat_callback(FLT_OR_DBL *pr, int pr_size, int i, int max, unsigned int type, void *data); PRIVATE FLT_OR_DBL sc_contribution(vrna_fold_compound_t *vc, int i, int j, int k, int l); PRIVATE FLT_OR_DBL sc_dummy(vrna_fold_compound_t *vc, int i, int j, int k, int l); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ PUBLIC vrna_ep_t * vrna_pfl_fold(const char *sequence, int window_size, int max_bp_span, float cutoff) { default_cb_data data; data.fp_pU = NULL; data.pU = NULL; data.bpp_cutoff = (FLT_OR_DBL)cutoff; data.fp_bpp = NULL; data.bpp = NULL; data.bpp_max_size = 0; data.bpp_size = 0; data.stack_prob = NULL; data.stack_prob_max_size = 0; data.stack_prob_size = 0; data.bpp_print = 0; data.up_print = 0; vrna_pfl_fold_cb(sequence, window_size, max_bp_span, &backward_compat_callback, (void *)&data); /* resize pair probability list to actual size */ data.bpp = (vrna_ep_t *)vrna_realloc(data.bpp, sizeof(vrna_ep_t) * (data.bpp_size + 1)); data.bpp[data.bpp_size].i = 0; data.bpp[data.bpp_size].j = 0; data.bpp[data.bpp_size].type = VRNA_PLIST_TYPE_BASEPAIR; data.bpp[data.bpp_size].p = 0; return data.bpp; } PUBLIC double ** vrna_pfl_fold_up(const char *sequence, int ulength, int window_size, int max_bp_span) { unsigned int i; double **pU; default_cb_data data; pU = NULL; if (sequence) { i = strlen(sequence); pU = (double **)vrna_alloc(sizeof(double *) * (i + 2)); data.fp_pU = NULL; data.pU = pU; data.bpp_cutoff = 0.; data.fp_bpp = NULL; data.bpp = NULL; data.bpp_max_size = 0; data.bpp_size = 0; data.stack_prob = NULL; data.stack_prob_max_size = 0; data.stack_prob_size = 0; data.bpp_print = 0; data.up_print = 0; vrna_pfl_fold_up_cb(sequence, ulength, window_size, max_bp_span, &backward_compat_callback, (void *)&data); } return pU; } PRIVATE void alloc_helper_arrays(vrna_fold_compound_t *vc, int ulength, helper_arrays *aux_arrays, unsigned int options) { int i, n; n = vc->length; aux_arrays->pU = NULL; aux_arrays->pUO = NULL; aux_arrays->pUH = NULL; aux_arrays->pUI = NULL; aux_arrays->pUM = NULL; aux_arrays->prm_l = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * (n + 2)); aux_arrays->prm_l1 = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * (n + 2)); aux_arrays->prml = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * (n + 2)); if ((options & VRNA_PROBS_WINDOW_UP) && (ulength > 0)) { aux_arrays->pU = (double **)vrna_alloc((n + 1) * sizeof(double *)); for (i = 1; i <= n; i++) aux_arrays->pU[i] = (double *)vrna_alloc((MAX2(MAXLOOP, ulength) + 2) * sizeof(double)); if (options & VRNA_PROBS_WINDOW_UP_SPLIT) { aux_arrays->pUO = (double **)vrna_alloc((n + 1) * sizeof(double *)); aux_arrays->pUI = (double **)vrna_alloc((n + 1) * sizeof(double *)); aux_arrays->pUM = (double **)vrna_alloc((n + 1) * sizeof(double *)); aux_arrays->pUH = (double **)vrna_alloc((n + 1) * sizeof(double *)); for (i = 1; i <= n; i++) { aux_arrays->pUH[i] = (double *)vrna_alloc((MAX2(MAXLOOP, ulength) + 2) * sizeof(double)); aux_arrays->pUI[i] = (double *)vrna_alloc((MAX2(MAXLOOP, ulength) + 2) * sizeof(double)); aux_arrays->pUO[i] = (double *)vrna_alloc((MAX2(MAXLOOP, ulength) + 2) * sizeof(double)); aux_arrays->pUM[i] = (double *)vrna_alloc((MAX2(MAXLOOP, ulength) + 2) * sizeof(double)); } } } } PRIVATE void free_helper_arrays(vrna_fold_compound_t *vc, int ulength, helper_arrays *aux_arrays, unsigned int options) { int i, n; n = vc->length; free(aux_arrays->prm_l); free(aux_arrays->prm_l1); free(aux_arrays->prml); if ((options & VRNA_PROBS_WINDOW_UP) && (ulength > 0)) { for (i = 1; i <= n; i++) free(aux_arrays->pU[i]); free(aux_arrays->pU); if (options & VRNA_PROBS_WINDOW_UP_SPLIT) { for (i = 1; i <= n; i++) { free(aux_arrays->pUH[i]); free(aux_arrays->pUI[i]); free(aux_arrays->pUO[i]); free(aux_arrays->pUM[i]); } free(aux_arrays->pUH); free(aux_arrays->pUI); free(aux_arrays->pUO); free(aux_arrays->pUM); } } } PRIVATE void return_pU(int size, int i, int max_size, helper_arrays *aux_arrays, vrna_probs_window_callback *cb, void *data, unsigned int options) { if (options & VRNA_PROBS_WINDOW_UP_SPLIT) { cb(aux_arrays->pUO[i], size, i, max_size, VRNA_PROBS_WINDOW_UP | VRNA_EXT_LOOP, data); cb(aux_arrays->pUH[i], size, i, max_size, VRNA_PROBS_WINDOW_UP | VRNA_HP_LOOP, data); cb(aux_arrays->pUI[i], size, i, max_size, VRNA_PROBS_WINDOW_UP | VRNA_INT_LOOP, data); cb(aux_arrays->pUM[i], size, i, max_size, VRNA_PROBS_WINDOW_UP | VRNA_MB_LOOP, data); } else { cb(aux_arrays->pU[i], size, i, max_size, VRNA_PROBS_WINDOW_UP | VRNA_ANY_LOOP, data); } } PRIVATE INLINE void allocate_dp_matrices(vrna_fold_compound_t *vc, int i, unsigned int options) { char **ptype; int winSize; FLT_OR_DBL **pR, **q, **qb, **qm, **qm2, **QI5, **qmb, **q2l; vrna_mx_pf_t *mx; vrna_hc_t *hc; vrna_sc_t *sc; mx = vc->exp_matrices; pR = mx->pR; q = mx->q_local; qb = mx->qb_local; qm = mx->qm_local; qm2 = mx->qm2_local; QI5 = mx->QI5; qmb = mx->qmb; q2l = mx->q2l; ptype = vc->ptype_local; winSize = vc->window_size; hc = vc->hc; /* allocate new part of arrays */ pR[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); pR[i] -= i; q[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); q[i] -= i; qb[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); qb[i] -= i; qm[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); qm[i] -= i; if (options & VRNA_PROBS_WINDOW_UP) { qm2[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); qm2[i] -= i; QI5[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); qmb[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); q2l[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); } hc->matrix_local[i] = (unsigned char *)vrna_alloc((winSize + 1) * sizeof(unsigned char)); ptype[i] = (char *)vrna_alloc((winSize + 1) * sizeof(char)); ptype[i] -= i; switch (vc->type) { case VRNA_FC_TYPE_SINGLE: sc = vc->sc; if (sc) { if (sc->exp_energy_bp_local) sc->exp_energy_bp_local[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); if (sc->exp_energy_up) sc->exp_energy_up[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); vrna_sc_update(vc, i, VRNA_OPTION_PF | VRNA_OPTION_WINDOW); } break; case VRNA_FC_TYPE_COMPARATIVE: break; } } PRIVATE INLINE void free_dp_matrices(vrna_fold_compound_t *vc, unsigned int options) { char **ptype; int i, n, winSize; FLT_OR_DBL **pR, **q, **qb, **qm, **qm2, **QI5, **qmb, **q2l; vrna_mx_pf_t *mx; vrna_hc_t *hc; vrna_sc_t *sc; n = (int)vc->length; winSize = vc->window_size; mx = vc->exp_matrices; pR = mx->pR; q = mx->q_local; qb = mx->qb_local; qm = mx->qm_local; ptype = vc->ptype_local; hc = vc->hc; sc = vc->sc; for (i = MAX2(1, n - (winSize + MAXLOOP)); i <= n; i++) { free(pR[i] + i); free(q[i] + i); free(qb[i] + i); free(qm[i] + i); pR[i] = NULL; q[i] = NULL; qb[i] = NULL; qm[i] = NULL; if (options & VRNA_PROBS_WINDOW_UP) { qm2 = mx->qm2_local; QI5 = mx->QI5; qmb = mx->qmb; q2l = mx->q2l; free(qm2[i] + i); free(QI5[i]); free(qmb[i]); free(q2l[i]); qm2[i] = NULL; QI5[i] = NULL; qmb[i] = NULL; q2l[i] = NULL; } free(hc->matrix_local[i]); hc->matrix_local[i] = NULL; free(ptype[i] + i); ptype[i] = NULL; if (sc) { if (sc->exp_energy_up) free(sc->exp_energy_up[i]); if (sc->exp_energy_bp_local) free(sc->exp_energy_bp_local[i]); } } } PRIVATE INLINE void init_dp_matrices(vrna_fold_compound_t *vc, unsigned int options) { int j, max_j, winSize; winSize = vc->window_size; max_j = MIN2((int)vc->length, 2 * winSize + MAXLOOP + 2); for (j = 1; j <= max_j; j++) allocate_dp_matrices(vc, j, options); } PRIVATE INLINE void rotate_dp_matrices(vrna_fold_compound_t *vc, int j, unsigned int options) { char **ptype; int i, winSize, length; FLT_OR_DBL **pR, **q, **qb, **qm, **qm2, **QI5, **qmb, **q2l; vrna_mx_pf_t *mx; vrna_hc_t *hc; vrna_sc_t *sc; length = vc->length; winSize = vc->window_size; mx = vc->exp_matrices; pR = mx->pR; q = mx->q_local; qb = mx->qb_local; qm = mx->qm_local; ptype = vc->ptype_local; hc = vc->hc; sc = vc->sc; if (j > 2 * winSize + MAXLOOP + 1) { i = j - (2 * winSize + MAXLOOP + 1); /* free arrays may be faster than pointer rotation and reset to 0 values */ free(pR[i] + i); free(q[i] + i); free(qb[i] + i); free(qm[i] + i); pR[i] = NULL; q[i] = NULL; qb[i] = NULL; qm[i] = NULL; if (options & VRNA_PROBS_WINDOW_UP) { qm2 = mx->qm2_local; QI5 = mx->QI5; qmb = mx->qmb; q2l = mx->q2l; free(qm2[i] + i); free(QI5[i]); free(qmb[i]); free(q2l[i]); qm2[i] = NULL; QI5[i] = NULL; qmb[i] = NULL; q2l[i] = NULL; } free(hc->matrix_local[i]); hc->matrix_local[i] = NULL; free(ptype[i] + i); ptype[i] = NULL; if (sc) { if (sc->exp_energy_up) { free(sc->exp_energy_up[i]); sc->exp_energy_up[i] = NULL; } if (sc->exp_energy_bp_local) { free(sc->exp_energy_bp_local[i]); sc->exp_energy_bp_local[i] = NULL; } } if (j + 1 <= length) /* get arrays for next round */ allocate_dp_matrices(vc, j + 1, options); } } PRIVATE INLINE void init_constraints(vrna_fold_compound_t *fc, unsigned int options) { int j, max_j, winSize; winSize = fc->window_size; max_j = MIN2((int)fc->length, 2 * winSize + MAXLOOP + 2); for (j = 1; j <= max_j; j++) { make_ptypes(fc, j); vrna_hc_update(fc, j); vrna_sc_update(fc, j, VRNA_OPTION_PF | VRNA_OPTION_WINDOW); } } PRIVATE INLINE void rotate_constraints(vrna_fold_compound_t *fc, int j, unsigned int options) { if (j + 1 <= fc->length) { make_ptypes(fc, j + 1); vrna_hc_update(fc, j + 1); vrna_sc_update(fc, j + 1, VRNA_OPTION_PF | VRNA_OPTION_WINDOW); } } PUBLIC int vrna_probs_window(vrna_fold_compound_t *vc, int ulength, unsigned int options, vrna_probs_window_callback *cb, void *data) { unsigned char hc_decompose; int n, i, j, k, maxl, ov, winSize, pairSize, turn; FLT_OR_DBL temp, Qmax, qbt1, **q, **qb, **qm, **qm2, **pR; double max_real, *Fwindow; vrna_exp_param_t *pf_params; vrna_md_t *md; vrna_mx_pf_t *matrices; vrna_hc_t *hc; helper_arrays aux_arrays; vrna_mx_pf_aux_el_t aux_mx_el; vrna_mx_pf_aux_ml_t aux_mx_ml; ov = 0; Qmax = 0; if ((!vc) || (!cb)) return 0; /* failure */ if (!vrna_fold_compound_prepare(vc, VRNA_OPTION_PF | VRNA_OPTION_WINDOW)) { vrna_message_warning("vrna_probs_window: " "Failed to prepare vrna_fold_compound"); return 0; /* failure */ } /* here space for initializing everything */ n = vc->length; pf_params = vc->exp_params; md = &(pf_params->model_details); matrices = vc->exp_matrices; winSize = vc->window_size; pairSize = md->max_bp_span; turn = md->min_loop_size; q = matrices->q_local; qb = matrices->qb_local; qm = matrices->qm_local; qm2 = matrices->qm2_local; pR = matrices->pR; hc = vc->hc; alloc_helper_arrays(vc, ulength, &aux_arrays, options); Fwindow = (options & VRNA_PROBS_WINDOW_PF) ? (double *)vrna_alloc(sizeof(double) * (winSize + 1)) : NULL; /* very short molecule ? */ if (n < turn + 2) { if ((options & VRNA_PROBS_WINDOW_UP) && (ulength > 0)) { for (i = 1; i <= n; i++) { maxl = MIN2(MAX2(MAXLOOP, ulength), n); if (options & VRNA_PROBS_WINDOW_UP_SPLIT) { for (j = 0; j <= maxl; j++) { aux_arrays.pUO[i][j] = 1.; aux_arrays.pUH[i][j] = 0.; aux_arrays.pUI[i][j] = 0.; aux_arrays.pUM[i][j] = 0.; } } else { for (j = 0; j <= maxl; j++) aux_arrays.pU[i][j] = 1.; } return_pU(maxl, i, ulength, &aux_arrays, cb, data, options); } } free_helper_arrays(vc, ulength, &aux_arrays, options); return 1; /* success */ } init_dp_matrices(vc, options); init_constraints(vc, options); /* init auxiliary arrays for fast exterior/multibranch loops */ aux_mx_el = vrna_exp_E_ext_fast_init(vc); aux_mx_ml = vrna_exp_E_ml_fast_init(vc); max_real = (sizeof(FLT_OR_DBL) == sizeof(float)) ? FLT_MAX : DBL_MAX; /* start recursions */ for (j = turn + 2; j <= n + winSize; j++) { if (j <= n) { vrna_exp_E_ext_fast_update(vc, j, aux_mx_el); for (i = j - turn - 1; i >= MAX2(1, (j - winSize + 1)); i--) { hc_decompose = hc->matrix_local[i][j - i]; qbt1 = 0.; /* * construction of partition function of segment i,j * firstly that given i bound to j : qb(i,j) */ if (hc_decompose) { /* process hairpin loop(s) */ qbt1 += vrna_exp_E_hp_loop(vc, i, j); /* process interior loop(s) */ qbt1 += vrna_exp_E_int_loop(vc, i, j); /* process multibranch loop(s) */ qbt1 += vrna_exp_E_mb_loop_fast(vc, i, j, aux_mx_ml); } qb[i][j] = qbt1; /* Multibranch loop */ qm[i][j] = vrna_exp_E_ml_fast(vc, i, j, aux_mx_ml); if ((options & VRNA_PROBS_WINDOW_UP) && (ulength > 0)) { /* new qm2 computation done here */ const FLT_OR_DBL *qqm = vrna_exp_E_ml_fast_qqm(aux_mx_ml); temp = 0.0; for (k = i + 1; k <= j; k++) temp += qm[i][k - 1] * qqm[k]; qm2[i][j] = temp; } /* Exterior loop */ q[i][j] = temp = vrna_exp_E_ext_fast(vc, i, j, aux_mx_el); if (temp > Qmax) { Qmax = temp; if (Qmax > max_real / 10.) { vrna_message_warning("vrna_probs_window: " "Q close to overflow: %d %d %g\n", i, j, temp); } } if (temp >= max_real) { vrna_message_warning("vrna_probs_window: " "overflow while computing partition function for segment q[%d,%d]\n" "use larger pf_scale", i, j); vrna_exp_E_ml_fast_free(aux_mx_ml); vrna_exp_E_ext_fast_free(aux_mx_el); free_helper_arrays(vc, ulength, &aux_arrays, options); return 0; /* failure */ } } /* end for i */ /* * here we return the partition function for subsegments [i...j] in terms * of ensemble free energies G_ij = -RT * ln(Q_ij) in kcal/mol */ if (options & VRNA_PROBS_WINDOW_PF) { int start = MAX2(1, j - winSize + 1); Fwindow -= start; for (i = start; i <= j; i++) Fwindow[i] = (double)(-log(q[i][j]) - (j - i + 1) * log(pf_params->pf_scale)) * pf_params->kT / 1000.0; cb(Fwindow, j, start, winSize, VRNA_PROBS_WINDOW_PF, data); Fwindow += start; } /* * just as a general service, I save here the free energy of the windows * no output is generated, however,... */ if ((j >= winSize) && (options & VRNA_PROBS_WINDOW_UP)) { FLT_OR_DBL eee = 0.; eee = (FLT_OR_DBL)(-log(q[j - winSize + 1][j]) - winSize * log(pf_params->pf_scale)) * pf_params->kT / 1000.0; /* we could return this to the user via callback cb() if we were nice */ aux_arrays.pU[j][0] = eee; } /* rotate auxiliary arrays */ vrna_exp_E_ext_fast_rotate(aux_mx_el); vrna_exp_E_ml_fast_rotate(aux_mx_ml); } if (j > winSize) { compute_probs(vc, j, &aux_arrays, ulength, cb, data, options, &ov); if ((options & VRNA_PROBS_WINDOW_UP) && (j > winSize + MAXLOOP + 1)) compute_pU(vc, j - winSize - MAXLOOP - 1, ulength, &aux_arrays, cb, data, options); if (j > 2 * winSize + MAXLOOP + 1) { int start = j - (2 * winSize + MAXLOOP + 1); probability_correction(vc, start); if (options & VRNA_PROBS_WINDOW_BPP) { cb(pR[start], MIN2(start + winSize, n), start, winSize, VRNA_PROBS_WINDOW_BPP, data); } if (options & VRNA_PROBS_WINDOW_STACKP) { int start = j - (2 * winSize - MAXLOOP); if (start > 1) { FLT_OR_DBL *stack_probs = compute_stack_probabilities(vc, start); stack_probs -= start + 1; cb(stack_probs, MIN2(n - start + turn, pairSize), start, winSize, VRNA_PROBS_WINDOW_STACKP, data); stack_probs += start + 1; free(stack_probs); } } rotate_dp_matrices(vc, j, options); rotate_constraints(vc, j, options); } } /* end if (do_backtrack) */ } /* end for j */ /* finish output */ if (options & VRNA_PROBS_WINDOW_UP) for (j = MAX2(1, n - MAXLOOP); j <= n; j++) compute_pU(vc, j, ulength, &aux_arrays, cb, data, options); for (j = MAX2(n - winSize - MAXLOOP, 1); j <= n; j++) { probability_correction(vc, j); if (options & VRNA_PROBS_WINDOW_BPP) { cb(pR[j], MIN2(j + winSize, n), j, winSize, VRNA_PROBS_WINDOW_BPP, data); } if ((options & VRNA_PROBS_WINDOW_STACKP) && j < n) { int start = j; if (start > 1) { FLT_OR_DBL *stack_probs = compute_stack_probabilities(vc, start); stack_probs -= start + 1; cb(stack_probs, MIN2(n - start + turn, pairSize), start, winSize, VRNA_PROBS_WINDOW_STACKP, data); stack_probs += start + 1; free(stack_probs); } } } if (ov > 0) { vrna_message_warning("vrna_probs_window: " "%d overflows occurred while backtracking;\n" "you might try a smaller pf_scale than %g\n", ov, pf_params->pf_scale); } free_dp_matrices(vc, options); free_helper_arrays(vc, ulength, &aux_arrays, options); /* free memory occupied by auxiliary arrays for fast exterior/multibranch loops */ vrna_exp_E_ml_fast_free(aux_mx_ml); vrna_exp_E_ext_fast_free(aux_mx_el); free(Fwindow); return 1; /* success */ } PRIVATE FLT_OR_DBL sc_contribution(vrna_fold_compound_t *vc, int i, int j, int k, int l) { FLT_OR_DBL q; vrna_sc_t *sc; q = 1.; sc = vc->sc; if (sc->exp_energy_up) q *= sc->exp_energy_up[i + 1][k - i - 1] * sc->exp_energy_up[l + 1][j - l - 1]; if (sc->exp_energy_bp_local) q *= sc->exp_energy_bp_local[i][j - i]; if ((sc->exp_energy_stack) && (i + 1 == k) && (l + 1 == j)) { q *= sc->exp_energy_stack[i] * sc->exp_energy_stack[k] * sc->exp_energy_stack[l] * sc->exp_energy_stack[j]; } if (sc->f) q *= sc->f(i, j, k, l, VRNA_DECOMP_PAIR_IL, sc->data); return q; } PRIVATE FLT_OR_DBL sc_dummy(vrna_fold_compound_t *vc, int i, int j, int k, int l) { return 1.; } PRIVATE void add_QI5_contribution(FLT_OR_DBL **QI5, int i, int j, FLT_OR_DBL q, FLT_OR_DBL qkl) { QI5[i][j] += q * qkl; } PRIVATE void add_QI5_dummy(FLT_OR_DBL **QI5, int i, int j, FLT_OR_DBL q, FLT_OR_DBL qkl) { return; } PRIVATE void compute_probs(vrna_fold_compound_t *vc, int j, helper_arrays *aux_arrays, int ulength, vrna_probs_window_callback *cb, void *data, unsigned int options, int *ov) { char **ptype; short *S1; int start_i, i, k, l, n, m, winSize, turn, type, type_2, tt, *rtype; FLT_OR_DBL *prml, *prm_l, *prm_l1, **pR, **QI5, **qmb, **q2l, **qb, **q, **qm, *scale, *expMLbase, expMLclosing, temp, prm_MLb, prmt1, prmt, *tmp, Qmax; double max_real; vrna_exp_param_t *pf_params; vrna_md_t *md; vrna_hc_t *hc; vrna_sc_t *sc; sc_int *sc_int_f; add_QI5 *add_QI5_f; max_real = (sizeof(FLT_OR_DBL) == sizeof(float)) ? FLT_MAX : DBL_MAX; prml = aux_arrays->prml; prm_l = aux_arrays->prm_l; prm_l1 = aux_arrays->prm_l1; n = vc->length; winSize = vc->window_size; S1 = vc->sequence_encoding; ptype = vc->ptype_local; pf_params = vc->exp_params; md = &(pf_params->model_details); turn = md->min_loop_size; rtype = &(md->rtype[0]); expMLclosing = pf_params->expMLclosing; scale = vc->exp_matrices->scale; expMLbase = vc->exp_matrices->expMLbase; hc = vc->hc; sc = vc->sc; pR = vc->exp_matrices->pR; QI5 = vc->exp_matrices->QI5; qmb = vc->exp_matrices->qmb; q2l = vc->exp_matrices->q2l; q = vc->exp_matrices->q_local; qb = vc->exp_matrices->qb_local; qm = vc->exp_matrices->qm_local; Qmax = 0; /* assign helper functions */ if (sc) sc_int_f = &sc_contribution; else sc_int_f = &sc_dummy; if (options & VRNA_PROBS_WINDOW_UP) add_QI5_f = &add_QI5_contribution; else add_QI5_f = &add_QI5_dummy; /* start recursion */ /* * i=j-winSize; * initialize multiloopfs */ for (k = j - winSize; k <= MIN2(n, j); k++) { prml[k] = 0; prm_l[k] = 0; /* prm_l1[k]=0; others stay */ } k = j - winSize; prm_l1[k] = 0; for (l = k + turn + 1; l <= MIN2(n, k + winSize - 1); l++) { int a; pR[k][l] = 0; /* set zero at start */ type = vrna_get_ptype_window(k, l + k, ptype); if (qb[k][l] == 0) continue; /* Exterior loop cases */ if (hc->matrix_local[k][l - k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) { for (a = MAX2(1, l - winSize + 2); a < MIN2(k, n - winSize + 2); a++) pR[k][l] += q[a][k - 1] * q[l + 1][a + winSize - 1] / q[a][a + winSize - 1]; if (l - k + 1 == winSize) { pR[k][l] += 1. / q[k][l]; } else { if (k + winSize - 1 <= n) /* k outermost */ pR[k][l] += q[l + 1][k + winSize - 1] / q[k][k + winSize - 1]; if (l - winSize + 1 >= 1) /* l outermost */ pR[k][l] += q[l - winSize + 1][k - 1] / q[l - winSize + 1][l]; } pR[k][l] *= vrna_exp_E_ext_stem(type, (k > 1) ? S1[k - 1] : -1, (l < n) ? S1[l + 1] : -1, pf_params); } if (hc->matrix_local[k][l - k] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC) { FLT_OR_DBL ppp; type_2 = rtype[vrna_get_ptype_window(k, l + k, ptype)]; ppp = 0.; start_i = k - MAXLOOP - 1; if (start_i < l - winSize + 1) start_i = l - winSize + 1; if (start_i < 1) start_i = 1; int u1 = 0; short sk1, sl1, si1; sk1 = S1[k - 1]; sl1 = S1[l + 1]; for (i = k - 1; i >= start_i; i--, u1++) { int max_m = i + winSize - 1; if (hc->up_int[i + 1] < u1) break; si1 = S1[i + 1]; if (max_m > l + MAXLOOP - u1 + 1) max_m = l + MAXLOOP - u1 + 1; if (max_m > n) max_m = n; for (m = l + 1; m <= max_m; m++) { int u2 = m - l - 1; if (hc->up_int[l + 1] < u2) break; if (hc->matrix_local[i][m - i] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) { type = vrna_get_ptype_window(i, m + i, ptype); if (pR[i][m] > 0) { temp = pR[i][m] * exp_E_IntLoop(u1, u2, type, type_2, si1, S1[m - 1], sk1, sl1, pf_params) * sc_int_f(vc, i, m, k, l) * scale[u1 + u2 + 2]; add_QI5_f(QI5, i, k - i - 1, temp, qb[k][l]); add_QI5_f(QI5, l, m - l - 1, temp, qb[k][l]); ppp += temp; } } } } pR[k][l] += ppp; } } /* 3. bonding k,l as substem of multi-loop enclosed by i,m */ prm_MLb = 0.; if (k > 1) { /* sonst nix! */ for (l = MIN2(n - 1, k + winSize - 2); l >= k + turn + 1; l--) { FLT_OR_DBL ppp; /* opposite direction */ m = l + 1; prmt = prmt1 = 0.0; for (i = MAX2(1, l - winSize + 2); i < k - 1 /* turn */; i++) { if (hc->matrix_local[i][m - i] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP) { tt = rtype[vrna_get_ptype_window(i, m + i, ptype)]; ppp = pR[i][m] * exp_E_MLstem(tt, S1[m - 1], S1[i + 1], pf_params) * qm[i + 1][k - 1]; if (sc) if (sc->exp_energy_bp_local) ppp *= sc->exp_energy_bp_local[i][m - i]; prmt += ppp; } } prmt *= expMLclosing; prml[m] = prmt; if (hc->matrix_local[k - 1][m - k + 1] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP) { tt = rtype[vrna_get_ptype_window(k - 1, m + k - 1, ptype)]; prmt1 = pR[k - 1][m] * expMLclosing * exp_E_MLstem(tt, S1[l], S1[k], pf_params); if (sc) if (sc->exp_energy_bp_local) prmt1 *= sc->exp_energy_bp_local[k - 1][m - k + 1]; } /* k-1 is unpaired */ if (hc->up_ml[k - 1]) { ppp = prm_l1[m] * expMLbase[1]; if (sc) if (sc->exp_energy_up) ppp *= sc->exp_energy_up[k - 1][1]; prm_l[m] = ppp + prmt1; } else { /* skip configuration where k-1 is unpaired */ prm_l[m] = prmt1; } /* m is unpaired */ if (hc->up_ml[m]) { ppp = prm_MLb * expMLbase[1]; if (sc) if (sc->exp_energy_up) ppp *= sc->exp_energy_up[m][1]; prm_MLb = ppp + prml[m]; } else { prm_MLb = prml[m]; } /* * same as: prm_MLb = 0; * for (i=n; i>k; i--) prm_MLb += prml[i]*expMLbase[k-i-1]; */ prml[m] = prml[m] + prm_l[m]; if (qb[k][l] == 0.) continue; if (hc->matrix_local[k][l - k] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) { tt = vrna_get_ptype_window(k, l + k, ptype); if (options & VRNA_PROBS_WINDOW_UP) { double dang; /* coefficient for computations of unpairedarrays */ dang = qb[k][l] * exp_E_MLstem(tt, (k > 1) ? S1[k - 1] : -1, (l < n) ? S1[l + 1] : -1, pf_params) * scale[2]; for (m = MIN2(k + winSize - 2, n); m >= l + 2; m--) { qmb[l][m - l - 1] += prml[m] * dang; q2l[l][m - l - 1] += (prml[m] - prm_l[m]) * dang; } } temp = prm_MLb; for (m = MIN2(k + winSize - 2, n); m >= l + 2; m--) temp += prml[m] * qm[l + 1][m - 1]; temp *= exp_E_MLstem(tt, (k > 1) ? S1[k - 1] : -1, (l < n) ? S1[l + 1] : -1, pf_params) * scale[2]; pR[k][l] += temp; } if (pR[k][l] > Qmax) { Qmax = pR[k][l]; if (Qmax > max_real / 10.) vrna_message_warning("P close to overflow: %d %d %g %g\n", i, m, pR[k][l], qb[k][l]); } if (pR[k][l] >= max_real) { (*ov)++; pR[k][l] = FLT_MAX; } } /* end for (l=..) */ } tmp = prm_l1; aux_arrays->prm_l1 = prm_l; aux_arrays->prm_l = tmp; } PRIVATE void probability_correction(vrna_fold_compound_t *vc, int i) { int j, howoften, pairdist, turn, n, winSize; FLT_OR_DBL **qb, **pR; n = vc->length; winSize = vc->window_size; turn = vc->exp_params->model_details.min_loop_size; howoften = 0; /* how many samples do we have for this pair */ qb = vc->exp_matrices->qb_local; pR = vc->exp_matrices->pR; for (j = i + turn; j < MIN2(i + winSize, n + 1); j++) { pairdist = (j - i + 1); /* 4cases */ howoften = MIN2(winSize - pairdist + 1, i); /* pairdist,start */ howoften = MIN2(howoften, n - j + 1); /* end */ howoften = MIN2(howoften, n - winSize + 1); /* windowsize */ pR[i][j] *= qb[i][j] / howoften; } return; } PRIVATE void make_ptypes(vrna_fold_compound_t *vc, int i) { /* make new entries in ptype array */ char **ptype; const short *S; int j, type, pairSize, n; vrna_md_t *md; ptype = vc->ptype_local; md = &(vc->exp_params->model_details); pairSize = md->max_bp_span; S = vc->sequence_encoding2; n = vc->length; for (j = i; j <= MIN2(i + pairSize, n); j++) { type = md->pair[S[i]][S[j]]; ptype[i][j] = (char)type; } return; } #if 0 PRIVATE vrna_ep_t * get_deppp(vrna_fold_compound_t *vc, vrna_ep_t *pl, int start) { /* compute dependent pair probabilities */ int i, j, count = 0; double tmp; vrna_ep_t *temp; char **ptype; short *S1; FLT_OR_DBL **qb, *scale; int *rtype, turn, pairsize, length; vrna_exp_param_t *pf_params; S1 = vc->sequence_encoding; pf_params = vc->exp_params; ptype = vc->ptype_local; qb = vc->exp_matrices->qb_local; scale = vc->exp_matrices->scale; rtype = &(pf_params->model_details.rtype[0]); turn = pf_params->model_details.min_loop_size; pairsize = pf_params->model_details.max_bp_span; length = vc->length; temp = (vrna_ep_t *)vrna_alloc(pairsize * sizeof(vrna_ep_t)); /* holds temporary deppp */ for (j = start + turn; j < MIN2(start + pairsize, length); j++) { if ((qb[start][j] * qb[start - 1][(j + 1)]) > 10e-200) { int type = ptype[start - 1][j + 1]; int type_2 = rtype[(unsigned char)ptype[start][j]]; tmp = qb[start][j] / qb[start - 1][(j + 1)] * exp_E_IntLoop(0, 0, type, type_2, S1[start], S1[j], S1[start - 1], S1[j + 1], pf_params) * scale[2]; temp[count].i = start; temp[count].j = j; temp[count++].p = tmp; } } /* write it to list of deppps */ for (i = 0; pl[i].i != 0; i++); pl = (vrna_ep_t *)vrna_realloc(pl, (i + count + 1) * sizeof(vrna_ep_t)); for (j = 0; j < count; j++) { pl[i + j].i = temp[j].i; pl[i + j].j = temp[j].j; pl[i + j].p = temp[j].p; } pl[i + count].i = 0; pl[i + count].j = 0; pl[i + count].p = 0; free(temp); return pl; } #endif PRIVATE FLT_OR_DBL * compute_stack_probabilities(vrna_fold_compound_t *vc, int start) { /* compute dependent pair probabilities */ char **ptype; short *S1; int j, max_j, *rtype, turn, pairsize, length, type, type_2; FLT_OR_DBL **qb, *scale, *probs; double tmp; vrna_exp_param_t *pf_params; length = vc->length; S1 = vc->sequence_encoding; pf_params = vc->exp_params; ptype = vc->ptype_local; qb = vc->exp_matrices->qb_local; scale = vc->exp_matrices->scale; rtype = &(pf_params->model_details.rtype[0]); turn = pf_params->model_details.min_loop_size; pairsize = pf_params->model_details.max_bp_span; max_j = MIN2(start + pairsize, length) - 1; probs = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * (max_j - start + 1)); for (j = start + turn + 1; j <= max_j; j++) { if ((qb[start][j] * qb[start - 1][(j + 1)]) > 10e-200) { type = vrna_get_ptype_window(start - 1, j + 1 + start - 1, ptype); type_2 = rtype[vrna_get_ptype_window(start, j + start, ptype)]; tmp = qb[start][j] / qb[start - 1][(j + 1)] * exp_E_IntLoop(0, 0, type, type_2, S1[start], S1[j], S1[start - 1], S1[j + 1], pf_params) * scale[2]; probs[j - start - 1] = tmp; } } return probs; } /* * Here: Space for questions... */ PRIVATE void compute_pU(vrna_fold_compound_t *vc, int k, int ulength, helper_arrays *aux_arrays, vrna_probs_window_callback *cb, void *data, unsigned int options) { /* * here, we try to add a function computing all unpaired probabilities starting at some i, * going down to $unpaired, to be unpaired, i.e. a list with entries from 1 to unpaired for * every i, with the probability of a stretch of length x, starting at i-x+1, to be unpaired */ char **ptype; short *S1; int startu, i5, j3, len, obp, *rtype, turn, winSize, n, leftmost, rightmost, tt; FLT_OR_DBL expMLclosing, *expMLbase, **q, **qm, **qm2, *scale, **pR, **QI5, **q2l, **qmb; double qqq, temp, *QBE, *QBI, *QBM, *QBH, **pU, **pUO, **pUH, **pUI, **pUM; vrna_exp_param_t *pf_params; vrna_hc_t *hc; vrna_sc_t *sc; n = vc->length; winSize = vc->window_size; S1 = vc->sequence_encoding; pf_params = vc->exp_params; ptype = vc->ptype_local; rtype = &(pf_params->model_details.rtype[0]); scale = vc->exp_matrices->scale; q = vc->exp_matrices->q_local; qm = vc->exp_matrices->qm_local; qm2 = vc->exp_matrices->qm2_local; expMLbase = vc->exp_matrices->expMLbase; expMLclosing = pf_params->expMLclosing; pR = vc->exp_matrices->pR; QI5 = vc->exp_matrices->QI5; q2l = vc->exp_matrices->q2l; qmb = vc->exp_matrices->qmb; turn = pf_params->model_details.min_loop_size; hc = vc->hc; sc = vc->sc; pU = aux_arrays->pU; pUO = aux_arrays->pUO; pUH = aux_arrays->pUH; pUI = aux_arrays->pUI; pUM = aux_arrays->pUM; QBE = (double *)vrna_alloc((MAX2(ulength, MAXLOOP) + 2) * sizeof(double)); QBM = (double *)vrna_alloc((MAX2(ulength, MAXLOOP) + 2) * sizeof(double)); QBI = (double *)vrna_alloc((MAX2(ulength, MAXLOOP) + 2) * sizeof(double)); QBH = (double *)vrna_alloc((MAX2(ulength, MAXLOOP) + 2) * sizeof(double)); /* * first, we will * for k<=ulength, pU[k][k]=0, because no bp can enclose it */ /* compute pu[k+ulength][ulength] */ for (i5 = MAX2(k + ulength - winSize + 1, 1); i5 <= k; i5++) { for (j3 = k + ulength + 1; j3 <= MIN2(n, i5 + winSize - 1); j3++) { /* Multiloops */ if (hc->matrix_local[i5][j3 - i5] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP) { tt = rtype[vrna_get_ptype_window(i5, j3 + i5, ptype)]; temp = 0.; /* * (.. >-----|..........) * i5 j j+ulength j3 */ /* (..{}{}-----|......) */ if ((hc->up_ml[k + 1] >= j3 - k - 1) && (i5 < k)) { qqq = qm2[i5 + 1][k] * expMLbase[j3 - k - 1]; if (sc) { if (sc->exp_energy_up) qqq *= sc->exp_energy_up[k + 1][j3 - k - 1]; if (sc->f) qqq *= sc->f(i5, j3, i5 + 1, k, VRNA_DECOMP_PAIR_ML, sc->data); } temp += qqq; } /* (..|-----|{}{}) */ if ((hc->up_ml[i5 + 1] >= k + ulength - i5) && (j3 - 1 > k + ulength)) { qqq = qm2[k + ulength + 1][j3 - 1] * expMLbase[k + ulength - i5]; if (sc) { if (sc->exp_energy_up) qqq *= sc->exp_energy_up[i5 + 1][k + ulength - i5]; if (sc->f) qqq *= sc->f(i5, j3, k + ulength + 1, j3, VRNA_DECOMP_PAIR_ML, sc->data); } temp += qqq; } /* ({}|-----|{}) */ if ((hc->up_ml[k + 1] >= ulength) && (i5 < k) && (j3 - 1 > k + ulength)) { qqq = qm[i5 + 1][k] * qm[k + ulength + 1][j3 - 1] * expMLbase[ulength]; if (sc) { if (sc->exp_energy_up) qqq *= sc->exp_energy_up[k + 1][ulength]; if (sc->f) qqq *= sc->f(i5, j3, k, k + ulength + 1, VRNA_DECOMP_PAIR_ML_OUTSIDE, sc->data); } temp += qqq; } /* add dangles, multloopclosing etc. */ qqq = exp_E_MLstem(tt, S1[j3 - 1], S1[i5 + 1], pf_params) * scale[2] * expMLclosing; if (sc) if (sc->exp_energy_bp_local) qqq *= sc->exp_energy_bp_local[i5][j3 - i5]; temp *= qqq; pU[k + ulength][ulength] += temp * pR[i5][j3]; if (options & VRNA_PROBS_WINDOW_UP_SPLIT) pUM[k + ulength][ulength] += temp * pR[i5][j3]; } /* add hairpins */ if (hc->matrix_local[i5][j3 - i5] & VRNA_CONSTRAINT_CONTEXT_HP_LOOP) { temp = vrna_exp_E_hp_loop(vc, i5, j3); pU[k + ulength][ulength] += temp * pR[i5][j3]; if (options & VRNA_PROBS_WINDOW_UP_SPLIT) pUH[k + ulength][ulength] += temp * pR[i5][j3]; } } } /* Add Interior loop contribution to QBE (and QBI) */ temp = 0.; for (len = winSize; len > MAX2(ulength, MAXLOOP); len--) temp += QI5[k][len]; for (; len > 0; len--) { temp += QI5[k][len]; QBI[len] += temp; QBE[len] += temp; } /* Add Hairpin loop contribution to QBE (and QBH) */ temp = 0.; for (obp = MIN2(n, k + winSize - 1); obp > k + ulength; obp--) temp += pR[k][obp] * vrna_exp_E_hp_loop(vc, k, obp); for (obp = MIN2(n, MIN2(k + winSize - 1, k + ulength)); obp > k + 1; obp--) { temp += pR[k][obp] * vrna_exp_E_hp_loop(vc, k, obp); QBH[obp - k - 1] += temp; QBE[obp - k - 1] += temp; } /* * Add up Multiloopterms qmb[l][m]+=prml[m]*dang; * q2l[l][m]+=(prml[m]-prm_l[m])*dang; */ temp = 0.; /* add (()()____) type cont. to I3 */ if (sc && sc->exp_energy_up) { for (len = winSize; len >= ulength; len--) if (hc->up_ml[k + 1] >= len) { temp += q2l[k][len] * expMLbase[len] * sc->exp_energy_up[k + 1][len]; } for (; len > 0; len--) { if (hc->up_ml[k + 1] >= len) { temp += q2l[k][len] * expMLbase[len] * sc->exp_energy_up[k + 1][len]; } QBM[len] += temp; QBE[len] += temp; } } else { for (len = winSize; len >= ulength; len--) if (hc->up_ml[k + 1] >= len) temp += q2l[k][len] * expMLbase[len]; for (; len > 0; len--) { if (hc->up_ml[k + 1] >= len) temp += q2l[k][len] * expMLbase[len]; QBM[len] += temp; QBE[len] += temp; } } /* add (()___()) */ for (len = 1; len < ulength; len++) { if (hc->up_ml[k + 1] >= len) { for (obp = k + len + turn; obp <= MIN2(n, k + winSize - 1); obp++) { temp = qmb[k][obp - k - 1] * qm[k + len + 1 /*2*/][obp - 1] * expMLbase[len]; if (sc) if (sc->exp_energy_up) temp *= sc->exp_energy_up[k + 1][len]; QBM[len] += temp; QBE[len] += temp; } } } /* add (___()()) */ for (len = 1; len < ulength; len++) { if (hc->up_ml[k + 1] >= len) { for (obp = k + len + turn + turn; obp <= MIN2(n, k + winSize - 1); obp++) { if (hc->matrix_local[k][obp - k] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP) { tt = rtype[vrna_get_ptype_window(k, obp + k, ptype)]; temp = exp_E_MLstem(tt, S1[obp - 1], S1[k + 1], pf_params) * scale[2] * expMLbase[len] * expMLclosing * pR[k][obp] * qm2[k + len + 1][obp - 1]; /* k:obp */ if (sc) { if (sc->exp_energy_up) temp *= sc->exp_energy_up[k + 1][len]; if (sc->exp_energy_bp) temp *= sc->exp_energy_bp_local[k][obp - k]; } QBM[len] += temp; QBE[len] += temp; } } } } /* * After computing all these contributions in QBE[len], that k is paired * and the unpaired stretch is AT LEAST len long, we start to add that to * the old unpaired thingies; */ for (len = 1; len <= MIN2(MAX2(ulength, MAXLOOP), n - k); len++) pU[k + len][len] += pU[k + len][len + 1] + QBE[len]; if (options & VRNA_PROBS_WINDOW_UP_SPLIT) { for (len = 1; len <= MIN2(MAX2(ulength, MAXLOOP), n - k); len++) { pUH[k + len][len] += pUH[k + len][len + 1] + QBH[len]; pUM[k + len][len] += pUM[k + len][len + 1] + QBM[len]; pUI[k + len][len] += pUI[k + len][len + 1] + QBI[len]; } /* open chain */ if ((ulength >= winSize) && (k >= ulength) && (hc->up_ext[k - winSize + 1] >= winSize)) pUO[k][winSize] = scale[winSize] / q[k - winSize + 1][k]; } /* open chain */ if ((ulength >= winSize) && (k >= ulength) && (hc->up_ext[k - winSize + 1] >= winSize)) { if (sc && sc->exp_energy_up) { pU[k][winSize] = scale[winSize] * sc->exp_energy_up[k][winSize] / q[k - winSize + 1][k]; } else { pU[k][winSize] = scale[winSize] / q[k - winSize + 1][k]; } } /* * now the not enclosed by any base pair terms for whatever it is we do not need anymore... * ... which should be e.g; k, again */ for (startu = MIN2(ulength, k); startu > 0; startu--) { temp = 0.; /* check whether soft constraint unpaired contributions available */ if (sc && sc->exp_energy_up) { if (hc->up_ext[k - startu + 1] >= startu) { for (i5 = MAX2(1, k - winSize + 2); i5 <= MIN2(k - startu, n - winSize + 1); i5++) temp += q[i5][k - startu] * q[k + 1][i5 + winSize - 1] * scale[startu] * sc->exp_energy_up[k - startu + 1][startu] / q[i5][i5 + winSize - 1]; /* the 2 Cases where the borders are on the edge of the interval */ if ((k >= winSize) && (startu + 1 <= winSize)) { temp += q[k - winSize + 1][k - startu] * scale[startu] * sc->exp_energy_up[k - startu + 1][startu] / q[k - winSize + 1][k]; } if ((k <= n - winSize + startu) && (k - startu >= 0) && (k < n) && (startu + 1 <= winSize)) { temp += q[k + 1][k - startu + winSize] * scale[startu] * sc->exp_energy_up[k - startu + 1][startu] / q[k - startu + 1][k - startu + winSize]; } } } else { if (hc->up_ext[k - startu + 1] >= startu) { for (i5 = MAX2(1, k - winSize + 2); i5 <= MIN2(k - startu, n - winSize + 1); i5++) temp += q[i5][k - startu] * q[k + 1][i5 + winSize - 1] * scale[startu] / q[i5][i5 + winSize - 1]; /* the 2 Cases where the borders are on the edge of the interval */ if ((k >= winSize) && (startu + 1 <= winSize)) temp += q[k - winSize + 1][k - startu] * scale[startu] / q[k - winSize + 1][k]; if ((k <= n - winSize + startu) && (k - startu >= 0) && (k < n) && (startu + 1 <= winSize)) temp += q[k + 1][k - startu + winSize] * scale[startu] / q[k - startu + 1][k - startu + winSize]; } } /* Divide by number of possible windows */ leftmost = MAX2(1, k - winSize + 1); rightmost = MIN2(n - winSize + 1, k - startu + 1); pU[k][startu] += temp; pU[k][startu] /= (rightmost - leftmost + 1); if (options & VRNA_PROBS_WINDOW_UP_SPLIT) { pUO[k][startu] += temp; /* Do we want to make a distinction between those? */ pUO[k][startu] /= (rightmost - leftmost + 1); pUH[k][startu] /= (rightmost - leftmost + 1); pUI[k][startu] /= (rightmost - leftmost + 1); pUM[k][startu] /= (rightmost - leftmost + 1); } } free(QBE); free(QBI); free(QBH); free(QBM); /* call return callback */ return_pU(MIN2(ulength, k), k, ulength, aux_arrays, cb, data, options); return; } PRIVATE void print_bpp_callback(FLT_OR_DBL *pr, int size, int k, void *data) { int j; FILE *fp = ((default_cb_data *)data)->fp_bpp; FLT_OR_DBL cutoff = ((default_cb_data *)data)->bpp_cutoff; for (j = k + 1; j <= size; j++) { if (pr[j] < cutoff) continue; fprintf(fp, "%d %d %g\n", k, j, pr[j]); } } PRIVATE void store_bpp_callback(FLT_OR_DBL *pr, int size, int k, void *data) { int j; vrna_ep_t *pl = ((default_cb_data *)data)->bpp; unsigned int pl_size = ((default_cb_data *)data)->bpp_size; unsigned int pl_max_size = ((default_cb_data *)data)->bpp_max_size; FLT_OR_DBL cutoff = ((default_cb_data *)data)->bpp_cutoff; if (pl_max_size == 0) { /* init if necessary */ pl_max_size = 100; pl = (vrna_ep_t *)vrna_realloc(pl, sizeof(vrna_ep_t) * pl_max_size); } for (j = k + 1; j <= size; j++) { if (pr[j] < cutoff) continue; /* resize vrna_ep_t memory if necessary */ if (pl_size >= pl_max_size - 1) { pl_max_size *= 1.5; pl = (vrna_ep_t *)vrna_realloc(pl, sizeof(vrna_ep_t) * pl_max_size); } pl[pl_size].i = k; pl[pl_size].j = j; pl[pl_size].type = VRNA_PLIST_TYPE_BASEPAIR; pl[pl_size++].p = pr[j]; } /* mark end of vrna_ep_t */ pl[pl_size].i = 0; pl[pl_size].j = 0; pl[pl_size].type = VRNA_PLIST_TYPE_BASEPAIR; pl[pl_size].p = 0.; /* update data */ ((default_cb_data *)data)->bpp = pl; ((default_cb_data *)data)->bpp_size = pl_size; ((default_cb_data *)data)->bpp_max_size = pl_max_size; } #if 0 PRIVATE void store_stack_prob_callback(FLT_OR_DBL *pr, int size, int k, void *data) { int j; vrna_ep_t *pl = ((default_cb_data *)data)->stack_prob; unsigned int pl_size = ((default_cb_data *)data)->stack_prob_size; unsigned int pl_max_size = ((default_cb_data *)data)->stack_prob_max_size; FLT_OR_DBL cutoff = ((default_cb_data *)data)->bpp_cutoff; if (pl_max_size == 0) { /* init if necessary */ pl_max_size = 100; pl = (vrna_ep_t *)vrna_realloc(pl, sizeof(vrna_ep_t) * pl_max_size); } for (j = k + 1; j <= size; j++) { if (pr[j] < cutoff) continue; /* resize vrna_ep_t memory if necessary */ if (pl_size >= pl_max_size - 1) { pl_max_size *= 1.5; pl = (vrna_ep_t *)vrna_realloc(pl, sizeof(vrna_ep_t) * pl_max_size); } pl[pl_size].i = k; pl[pl_size].j = j; pl[pl_size].type = VRNA_PLIST_TYPE_BASEPAIR; pl[pl_size++].p = pr[j]; } /* mark end of vrna_ep_t */ pl[pl_size].i = 0; pl[pl_size].j = 0; pl[pl_size].type = VRNA_PLIST_TYPE_BASEPAIR; pl[pl_size].p = 0.; /* update data */ ((default_cb_data *)data)->stack_prob = pl; ((default_cb_data *)data)->stack_prob_size = pl_size; ((default_cb_data *)data)->stack_prob_max_size = pl_max_size; } #endif PRIVATE void print_pU_callback(double *pU, int size, int k, int ulength, unsigned int type, void *data) { if (type & VRNA_PROBS_WINDOW_UP) { int i; FILE *fp = ((default_cb_data *)data)->fp_pU; fprintf(fp, "%d\t", k); for (i = 1; i < size; i++) fprintf(fp, "%.7g\t", pU[i]); fprintf(fp, "%.7g", pU[size]); if ((type & VRNA_ANY_LOOP) == VRNA_ANY_LOOP) fprintf(fp, "\n"); else if (type & VRNA_EXT_LOOP) fprintf(fp, "\tE\n"); else if (type & VRNA_HP_LOOP) fprintf(fp, "\tH\n"); else if (type & VRNA_INT_LOOP) fprintf(fp, "\tI\n"); else if (type & VRNA_MB_LOOP) fprintf(fp, "\tM\n"); else vrna_message_warning("unknown loop type"); } } PRIVATE void store_pU_callback(double *pU, int size, int k, int ulength, unsigned int type, void *data) { int i; double **pU_storage = ((default_cb_data *)data)->pU; if ((type & VRNA_PROBS_WINDOW_UP) && ((type & VRNA_ANY_LOOP) == VRNA_ANY_LOOP)) { pU_storage[k] = (double *)vrna_alloc(sizeof(double) * (ulength + 1)); for (i = 1; i <= size; i++) pU_storage[k][i] = pU[i]; } } PRIVATE void backward_compat_callback(FLT_OR_DBL *pr, int pr_size, int i, int max, unsigned int type, void *data) { default_cb_data *d = (default_cb_data *)data; if (type & VRNA_PROBS_WINDOW_BPP) { if (d->bpp_print) print_bpp_callback(pr, pr_size, i, data); else store_bpp_callback(pr, pr_size, i, data); } else if (type & VRNA_PROBS_WINDOW_UP) { if (d->up_print) print_pU_callback(pr, pr_size, i, max, type, data); else store_pU_callback(pr, pr_size, i, max, type, data); } } #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY /* *########################################### *# deprecated functions below # *########################################### */ PRIVATE void putoutpU_prob_old(double **pU, int length, int ulength, FILE *fp, int energies, vrna_exp_param_t *parameters); PRIVATE void putoutpU_prob_bin_old(double **pU, int length, int ulength, FILE *fp, int energies, vrna_exp_param_t *parameters); PRIVATE vrna_ep_t * wrap_pf_foldLP(char *sequence, int winSize, int pairSize, float cutoffb, double **pU, vrna_ep_t **dpp2, FILE *pUfp, FILE *spup, vrna_exp_param_t *parameters) { int ulength, r; vrna_fold_compound_t *vc; vrna_md_t md; default_cb_data data; vc = NULL; ulength = 0; /* * if present, extract model details from provided parameters variable, * to properly initialize the fold compound. Otherwise use default * settings taken from deprecated global variables */ if (parameters) vrna_md_copy(&md, &(parameters->model_details)); else set_model_details(&md); md.compute_bpp = 1; /* turn on base pair probability computations */ md.window_size = winSize; /* set size of sliding window */ md.max_bp_span = pairSize; /* set maximum base pair span */ vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_DEFAULT | VRNA_OPTION_WINDOW); /* * if present, attach a copy of the parameters structure instead of the * default parameters but take care of re-setting it to (initialized) * model details */ free(vc->exp_params); if (parameters) { vrna_md_copy(&(parameters->model_details), &(vc->params->model_details)); vc->exp_params = vrna_exp_params_copy(parameters); } else { vc->exp_params = vrna_exp_params(&(vc->params->model_details)); } /* propagate global pf_scale into vc->exp_params */ vc->exp_params->pf_scale = pf_scale; if (backward_compat_compound && backward_compat) vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = vc; backward_compat = 1; iindx = backward_compat_compound->iindx; /* for backward compatibility and Perl wrapper */ if (pU) ulength = (int)pU[0][0] + 0.49; data.fp_pU = pUfp; data.pU = pU; data.bpp_cutoff = (FLT_OR_DBL)cutoffb; data.fp_bpp = spup; data.bpp = NULL; data.bpp_max_size = 0; data.bpp_size = 0; data.stack_prob = NULL; data.stack_prob_max_size = 0; data.stack_prob_size = 0; data.bpp_print = (spup) ? 1 : 0; data.up_print = (pUfp) ? 1 : 0; unsigned int options = VRNA_PROBS_WINDOW_BPP; /* always compute base pair probabilities */ if (dpp2 && (*dpp2)) options |= VRNA_PROBS_WINDOW_STACKP; if (ulength > 0) options |= VRNA_PROBS_WINDOW_UP; r = vrna_probs_window(vc, ulength, options, &backward_compat_callback, (void *)&data); if (!r) return NULL; if (dpp2 && (*dpp2)) { data.stack_prob = (vrna_ep_t *)vrna_realloc(data.stack_prob, sizeof(vrna_ep_t) * (data.stack_prob_size + 1)); data.stack_prob[data.stack_prob_size].i = 0; data.stack_prob[data.stack_prob_size].j = 0; data.stack_prob[data.stack_prob_size].type = VRNA_PLIST_TYPE_BASEPAIR; data.stack_prob[data.stack_prob_size].p = 0; free(*dpp2); /* free already occupied memory */ *dpp2 = data.stack_prob; } if (!spup) { data.bpp = (vrna_ep_t *)vrna_realloc(data.bpp, sizeof(vrna_ep_t) * (data.bpp_size + 1)); data.bpp[data.bpp_size].i = 0; data.bpp[data.bpp_size].j = 0; data.bpp[data.bpp_size].type = VRNA_PLIST_TYPE_BASEPAIR; data.bpp[data.bpp_size].p = 0; return data.bpp; } else { return NULL; } } PUBLIC void init_pf_foldLP(int length) { /* DO NOTHING */ } PUBLIC void update_pf_paramsLP(int length) { if (backward_compat_compound && backward_compat) { vrna_md_t md; set_model_details(&md); vrna_exp_params_reset(backward_compat_compound, &md); /* compatibility with RNAup, may be removed sometime */ pf_scale = backward_compat_compound->exp_params->pf_scale; } } PUBLIC void update_pf_paramsLP_par(int length, vrna_exp_param_t *parameters) { if (backward_compat_compound && backward_compat) { vrna_md_t md; if (parameters) { vrna_exp_params_subst(backward_compat_compound, parameters); } else { set_model_details(&md); vrna_exp_params_reset(backward_compat_compound, &md); } /* compatibility with RNAup, may be removed sometime */ pf_scale = backward_compat_compound->exp_params->pf_scale; } } PUBLIC vrna_ep_t * pfl_fold(char *sequence, int winSize, int pairSize, float cutoffb, double **pU, vrna_ep_t **dpp2, FILE *pUfp, FILE *spup) { return wrap_pf_foldLP(sequence, winSize, pairSize, cutoffb, pU, dpp2, pUfp, spup, NULL); } PUBLIC vrna_ep_t * pfl_fold_par(char *sequence, int winSize, int pairSize, float cutoffb, double **pU, vrna_ep_t **dpp2, FILE *pUfp, FILE *spup, vrna_exp_param_t *parameters) { return wrap_pf_foldLP(sequence, winSize, pairSize, cutoffb, pU, dpp2, pUfp, spup, parameters); } PUBLIC void putoutpU_prob(double **pU, int length, int ulength, FILE *fp, int energies) { if (backward_compat_compound && backward_compat) putoutpU_prob_old(pU, length, ulength, fp, energies, backward_compat_compound->exp_params); else vrna_message_warning("putoutpU_prob: Not doing anything! First, run pfl_fold()!"); } PUBLIC void putoutpU_prob_par(double **pU, int length, int ulength, FILE *fp, int energies, vrna_exp_param_t *parameters) { if ((pU) && (fp) && (parameters)) putoutpU_prob_old(pU, length, ulength, fp, energies, parameters); } PRIVATE void putoutpU_prob_old(double **pU, int length, int ulength, FILE *fp, int energies, vrna_exp_param_t *parameters) { /* put out unpaireds */ int i, k; double temp, kT = parameters->kT / 1000.0; if (energies) fprintf(fp, "#opening energies\n #i$\tl="); else fprintf(fp, "#unpaired probabilities\n #i$\tl="); for (i = 1; i <= ulength; i++) fprintf(fp, "%d\t", i); fprintf(fp, "\n"); for (k = 1; k <= length; k++) { fprintf(fp, "%d\t", k); for (i = 1; i <= ulength; i++) { if (i > k) { fprintf(fp, "NA\t"); continue; } if (energies) temp = -log(pU[k][i]) * kT; else temp = pU[k][i]; fprintf(fp, "%.7g\t", temp); } fprintf(fp, "\n"); free(pU[k]); } fflush(fp); } PUBLIC void putoutpU_prob_bin(double **pU, int length, int ulength, FILE *fp, int energies) { if (backward_compat_compound && backward_compat) putoutpU_prob_bin_old(pU, length, ulength, fp, energies, backward_compat_compound->exp_params); else vrna_message_warning("putoutpU_prob_bin: Not doing anything! First, run pfl_fold()!"); } PUBLIC void putoutpU_prob_bin_par(double **pU, int length, int ulength, FILE *fp, int energies, vrna_exp_param_t *parameters) { if ((pU) && (fp) && (parameters)) putoutpU_prob_bin_old(pU, length, ulength, fp, energies, parameters); } PRIVATE void putoutpU_prob_bin_old(double **pU, int length, int ulength, FILE *fp, int energies, vrna_exp_param_t *parameters) { /* put out unpaireds */ int i, k, *p; double kT = parameters->kT / 1000.0; p = (int *)vrna_alloc(sizeof(int) * 1); /* write first line */ p[0] = ulength; /* u length */ fwrite(p, sizeof(int), 1, fp); p[0] = length; /* seq length */ fwrite(p, sizeof(int), 1, fp); for (k = 3; k <= (length + 20); k++) { /* all the other lines are set to 1000000 because we are at ulength=0 */ p[0] = 1000000; fwrite(p, sizeof(int), 1, fp); } /* data */ for (i = 1; i <= ulength; i++) { for (k = 1; k <= 11; k++) { /* write first ten entries to 1000000 */ p[0] = 1000000; fwrite(p, sizeof(int), 1, fp); } for (k = 1; k <= length; k++) { /* write data now */ if (i > k) { p[0] = 1000000; /* check if u > pos */ fwrite(p, sizeof(int), 1, fp); continue; } else { p[0] = (int)rint(100 * (-log(pU[k][i]) * kT)); fwrite(p, sizeof(int), 1, fp); } } for (k = 1; k <= 9; k++) { /* finish by writing the last 10 entries */ p[0] = 1000000; fwrite(p, sizeof(int), 1, fp); } } /* free pU array; */ for (k = 1; k <= length; k++) free(pU[k]); free(p); fflush(fp); } #endif
collisions_sweepphi.c
/** * @file collisions.c * @brief Collision search using a line sweep algorithm, O(N log(N)). * @author Hanno Rein <hanno@hanno-rein.de> * * @details The routines in this file implement a collision detection * method called line sweep. It is very fast if all dimensions except one * are small. The algorithm is similar to the original algorithm proposed * by Bentley & Ottmann (1979) but does not maintain a binary search tree. * This is much faster as long as the number of particle trajectories * currently intersecting the plane is small. * * The sweeping direction in this implementation is phi. This can be used * for narrow rings, such as in the example 'spreading_ring'. * * * @section LICENSE * Copyright (c) 2011 Hanno Rein, Shangfei Liu * * This file is part of rebound. * * rebound is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * rebound is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with rebound. If not, see <http://www.gnu.org/licenses/>. * */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <time.h> #include "particle.h" #include "collisions.h" #include "collision_resolve.h" #include "rebound.h" #include "tree.h" #include "boundaries.h" #ifdef OPENMP #include <omp.h> #endif double collisions_max_r = 0; double collisions_max2_r = 0; int sweeps_proc = 1; /**< Number of processors used for seeping algorithm. */ int sweeps_init_done = 0; /**< Used for initialisation of data structures. */ int N_collisions = 0; // static inline double min(double a, double b){ return (a>b)?b:a;} // static inline double max(double a, double b){ return (b>a)?b:a;} static inline double sgn(const double a){ return (a>=0 ? 1. : -1); } /** * This function checks if two particles colliding during one drift step. * @param pt1 reb_particle 1. * @param pt2 reb_particle 2. * @param proci Processor id (OpenMP) for this collision. * @param crossing Flag that is one if one of the particles crosses a boundary in this timestep. */ void detect_collision_of_pair(int pt1, int pt2, int proci, int crossing); /** * Structure that stores a start or end point of a particle trajectory. */ struct phivalue { double phi; // position along sweep axis int inout; // start or endpoint int nphi; int crossing; // crosses boundary int pt; // particle }; /** * Structure that contains a list of xvalues. */ struct phivaluelist { struct phivalue* phivalues; int N; /**< Current array size. */ int Nmax; /**< Maximum array size before realloc() is needed. */ }; struct phivaluelist* sweepphi; /**< Pointers to the SWEEPY list of each processor. */ /** * Structure that contains a list of collisions. */ struct reb_collisionlist { struct reb_collision* collisions; int N; /**< Current array size. */ int Nmax; /**< Maximum array size before realloc() is needed. */ }; struct collisionlist* clist; /**< Pointers to the collisions list of each processor. */ /** * Adds a line to the SWEEPY array of processor proci. */ void add_line_to_phivsublist(double phi1, double phi2, int pt, int n, int proci, int crossing){ int N = sweepphi[proci].N; if (N+2>sweepphi[proci].Nmax){ sweepphi[proci].Nmax += 1024; sweepphi[proci].phivalues = (struct phivalue*)realloc(sweepphi[proci].phivalues,sweepphi[proci].Nmax*sizeof(struct phivalue)); } sweepphi[proci].phivalues[N].phi = phi1; sweepphi[proci].phivalues[N].pt = pt; sweepphi[proci].phivalues[N].nphi = n; sweepphi[proci].phivalues[N].inout = 0; sweepphi[proci].phivalues[N].crossing = crossing; sweepphi[proci].phivalues[N+1].phi = phi2; sweepphi[proci].phivalues[N+1].pt = pt; sweepphi[proci].phivalues[N+1].nphi = n; sweepphi[proci].phivalues[N+1].inout = 1; sweepphi[proci].phivalues[N+1].crossing = crossing; sweepphi[proci].N += 2; } /** * Adds a line to the SWEEPY array and checks for crossings of processor boundaries. */ void add_line_to_phivlist(double phi1, double phi2, int pt, int n, int crossing){ int prociphi1 = (int)(floor( (phi1/(2.*M_PI)+0.5) *(double)sweeps_proc));// %sweeps.phivlists; int prociphi2 = (int)(floor( (phi2/(2.*M_PI)+0.5) *(double)sweeps_proc));// %sweeps.phivlists; if (prociphi2>=sweeps_proc){ prociphi2 = sweeps_proc-1; } if (prociphi1<0){ prociphi1 = 0; } if (prociphi1!=prociphi2){ double b = -M_PI+2.*M_PI/(double)sweeps_proc*(double)prociphi2; add_line_to_phivsublist(phi1,b,pt,n,prociphi1,1); add_line_to_phivsublist(b,phi2,pt,n,prociphi2,1); }else{ add_line_to_phivsublist(phi1,phi2,pt,n,prociphi1,crossing); } } /** * Adds a line to the SWEEPY array and checks for crossings of simulation boundaries. */ void add_to_phivlist(double phi1, double phi2, int pt){ double phimin, phimax; if (phi1 < phi2){ phimin = phi1; phimax = phi2; }else{ phimin = phi2; phimax = phi1; } double radius = particles[pt].r*1.0001; //Safety factor to avoid floating point issues. phimin -= radius; phimax += radius; if (phimin<-M_PI){ add_line_to_phivlist(phimin+2.*M_PI,M_PI,pt,1,1); add_line_to_phivlist(-M_PI,phimax,pt,0,1); return; } if (phimax>M_PI){ add_line_to_phivlist(-M_PI,phimax-2.*M_PI,pt,-1,1); add_line_to_phivlist(phimin,M_PI,pt,0,1); return; } add_line_to_phivlist(phimin,phimax,pt,0,0); } /** * Compares the phi position of two phivalues. */ int compare_phivalue (const void * a, const void * b){ const double diff = ((struct phivalue*)a)->phi - ((struct phivalue*)b)->phi; if (diff > 0) return 1; if (diff < 0) return -1; return 0; } /** * Compares the phi position of two particles. */ int compare_particle (const void * a, const void * b){ const double diff = atan2(((struct reb_particle*)a)->y,((struct reb_particle*)a)->x) - atan2(((struct reb_particle*)b)->y,((struct reb_particle*)b)->x); if (diff > 0) return 1; if (diff < 0) return -1; return 0; } /** * Sorts the array phivl with insertion sort. */ void collisions_sweep_insertionsort_phivaluelist(struct phivaluelist* phivl){ struct phivalue* phiv = phivl->phivalues; int _N = phivl->N; for(int j=1;j<_N;j++){ struct phivalue key = phiv[j]; int i = j - 1; while(i >= 0 && phiv[i].phi > key.phi){ phiv[i+1] = phiv[i]; i--; } phiv[i+1] = key; } } /** * Sorts the particle array with insertion sort. */ void collisions_sweep_insertionsort_particles(void){ for(int j=1+N_collisions;j<N;j++){ struct reb_particle key = particles[j]; double keyphi = atan2(particles[j].y,particles[j].x); int i = j - 1; while(i >= N_collisions && atan2(particles[i].y,particles[i].x) > keyphi){ particles[i+1] = particles[i]; i--; } particles[i+1] = key; } } void reb_collision_search(void){ if (sweeps_init_done!=1){ sweeps_init_done = 1; #ifdef OPENMP sweeps_proc = omp_get_max_threads(); #endif // OPENMP sweepphi = (struct phivaluelist*) calloc(sweeps_proc,sizeof(struct phivaluelist)); clist = (struct reb_collisionlist*)calloc(sweeps_proc,sizeof(struct reb_collisionlist)); #ifndef TREE // Sort particles according to their phi position to speed up sorting of lines. // Initially the particles are not pre-sorted, thus qsort is faster than insertionsort. // Note that this rearranges particles and will cause problems if the particle id is used elsewhere. qsort (&(particles[N_collisions]), N-N_collisions, sizeof(struct reb_particle), compare_particle); }else{ // Keep particles sorted according to their phi position to speed up sorting of lines. collisions_sweep_insertionsort_particles(); #endif //TREE } for (int i=N_collisions;i<N;i++){ double phi = atan2(particles[i].y,particles[i].x); if (phi != phi) continue; double r = sqrt(particles[i].x*particles[i].x + particles[i].y*particles[i].y); double w = (particles[i].x*particles[i].vy - particles[i].y*particles[i].vx) / r; if (w != w) continue; double oldphi = phi-0.5*dt*w-collisions_max_r/r*2.*M_PI; double newphi = phi+0.5*dt*w+collisions_max_r/r*2.*M_PI; add_to_phivlist(oldphi,newphi,i); } #pragma omp parallel for schedule (static,1) for (int proci=0;proci<sweeps_proc;proci++){ struct phivaluelist* sweepphii = &(sweepphi[proci]); #ifdef TREE // Use quicksort when there is a tree. reb_particles are not pre-sorted. qsort (sweepphii->phivalues, sweepphii->N, sizeof(struct phivalue), compare_phivalue); #else //TREE // Use insertionsort when there is a tree. reb_particles are pre-sorted. collisions_sweep_insertionsort_phivaluelist(sweepphii); #endif //TREE // SWEEPL: List of lines intersecting the plane. struct phivaluelist sweepl = {NULL,0,0}; for (int i=0;i<sweepphii->N;i++){ struct phivalue phiv = sweepphii->phivalues[i]; if (phiv.inout == 0){ // Add event if start of line if (sweepl.N>=sweepl.Nmax){ sweepl.Nmax +=32; sweepl.phivalues = realloc(sweepl.phivalues,sizeof(struct phivalue)*sweepl.Nmax); } sweepl.phivalues[sweepl.N] = phiv; // Check for collisions with other particles in SWEEPL for (int k=0;k<sweepl.N;k++){ int p1 = phiv.pt; int p2 = sweepl.phivalues[k].pt; if (p1==p2) continue; int gbnphi = phiv.nphi; if (sweepl.phivalues[k].nphi!=0){ if (sweepl.phivalues[k].nphi==phiv.nphi) continue; int tmp = p1; p1 = p2; p2 = tmp; gbnphi = sweepl.phivalues[k].nphi; } detect_collision_of_pair(p1,p2,proci,sweepl.phivalues[k].crossing||phiv.crossing); } sweepl.N++; }else{ // Remove event if end of line for (int j=0;j<sweepl.N;j++){ if (sweepl.phivalues[j].pt == phiv.pt){ sweepl.N--; sweepl.phivalues[j] = sweepl.phivalues[sweepl.N]; j--; break; } } } } free(sweepl.phivalues); } } void detect_collision_of_pair(int pt1, int pt2, int proci, int crossing){ struct reb_particle* p1 = &(particles[pt1]); struct reb_particle* p2 = &(particles[pt2]); double x = p1->x - p2->x; double y = p1->y - p2->y; double z = p1->z - p2->z; double vx = p1->vx - p2->vx; double vy = p1->vy - p2->vy; double vz = p1->vz - p2->vz; double a = vx*vx + vy*vy + vz*vz; double b = 2.*(vx*x + vy*y + vz*z); double rr = p1->r + p2->r; double c = -rr*rr + x*x + y*y + z*z; double root = b*b-4.*a*c; if (root>=0.){ // Floating point optimized solution of a quadratic equation. Avoids cancelations. double q = -0.5*(b+sgn(b)*sqrt(root)); double time1 = c/q; double time2 = q/a; if (time1>time2){ double tmp = time2; time2=time1; time1=tmp; } if ( (time1>-dt/2. && time1<dt/2.) || (time1<-dt/2. && time2>dt/2.) ){ struct reb_collisionlist* clisti = &(clist[proci]); if (clisti->N>=clisti->Nmax){ clisti->Nmax += 1024; clisti->collisions = (struct reb_collision*)realloc(clisti->collisions,clisti->Nmax*sizeof(struct reb_collision)); } struct reb_collision* c = &(clisti->collisions[clisti->N]); c->p1 = pt1; c->p2 = pt2; if ( (time1>-dt/2. && time1<dt/2.)) { c->time = time1; }else{ c->time = 0; } c->crossing = crossing; clisti->N++; } } } void collisions_resolve(void){ #ifdef OPENMP omp_lock_t boundarylock; omp_init_lock(&boundarylock); #endif //OPENMP #pragma omp parallel for schedule (static,1) for (int proci=0;proci<sweeps_proc;proci++){ struct reb_collision* c = clist[proci].collisions; int colN = clist[proci].N; // Randomize array. for(int i=0; i<colN; i++){ int j = rand()%colN; struct reb_collision ctemp = c[i]; c[i]=c[j]; c[j]=ctemp; } for(int i=0; i<colN; i++){ struct reb_collision c1= c[i]; c1.gb = boundaries_get_ghostbox(0,0,0); particles[c1.p1].x -= c1.time*particles[c1.p1].vx; particles[c1.p1].y -= c1.time*particles[c1.p1].vy; particles[c1.p1].z -= c1.time*particles[c1.p1].vz; particles[c1.p2].x -= c1.time*particles[c1.p2].vx; particles[c1.p2].y -= c1.time*particles[c1.p2].vy; particles[c1.p2].z -= c1.time*particles[c1.p2].vz; #ifdef OPENMP if (c1.crossing){ omp_set_lock(&boundarylock); } #endif //OPENMP collision_resolve(c1); #ifdef OPENMP if (c1.crossing){ omp_unset_lock(&boundarylock); } #endif //OPENMP particles[c1.p1].x += c1.time*particles[c1.p1].vx; particles[c1.p1].y += c1.time*particles[c1.p1].vy; particles[c1.p1].z += c1.time*particles[c1.p1].vz; particles[c1.p2].x += c1.time*particles[c1.p2].vx; particles[c1.p2].y += c1.time*particles[c1.p2].vy; particles[c1.p2].z += c1.time*particles[c1.p2].vz; } clist[proci].N = 0; sweepphi[proci].N = 0; } #ifdef OPENMP omp_destroy_lock(&boundarylock); #endif //OPENMP }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
barrier-2.c
/* { dg-do compile } */ void f1(void) { #pragma omp barrier a /* { dg-error "expected end of line" } */ } /* OpenMP 2.5, section 2.7.3: Note that because the barrier construct does not have a C language statement as part of its syntax, there are some restrictions on its placement within a program. The barrier directive may only be placed in the program at a position where ignoring or deleting the directive would result in a program with correct syntax. */ void f2(void) { label: #pragma omp barrier } /* { dg-error "label at end of compound statement" } */ void f3(_Bool p) { if (p) #pragma omp barrier /* { dg-error "compound statements" } */ }
utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file utils.h * \brief Basic utilility functions. */ #ifndef MXNET_COMMON_UTILS_H_ #define MXNET_COMMON_UTILS_H_ #include <dmlc/logging.h> #include <dmlc/omp.h> #include <nnvm/graph.h> #include <nnvm/node.h> #include <mxnet/imperative.h> #include <mxnet/engine.h> #include <mxnet/ndarray.h> #include <mxnet/storage.h> #include <mxnet/op_attr_types.h> #include <mxnet/graph_attr_types.h> #include <nnvm/graph_attr_types.h> #include <memory> #include <vector> #include <type_traits> #include <utility> #include <random> #include <string> #include <thread> #include <algorithm> #include <functional> #include <limits> #include "../operator/mxnet_op.h" #if MXNET_USE_MKLDNN == 1 #include "../operator/nn/mkldnn/mkldnn_base-inl.h" #endif #if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__) #include <windows.h> #else #include <unistd.h> #endif namespace mxnet { namespace common { #if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__) inline size_t current_process_id() { return ::GetCurrentProcessId(); } #else inline size_t current_process_id() { return getpid(); } #endif /*! * \brief IndPtr should be non-negative, in non-decreasing order, start with 0 * and end with value equal with size of indices. */ struct csr_indptr_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr, const nnvm::dim_t end, const nnvm::dim_t idx_size) { if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] || (i == 0 && indptr[i] != 0) || (i == end - 1 && indptr[end] != idx_size)) *out = kCSRIndPtrErr; } }; /*! * \brief Indices should be non-negative, less than the number of columns * and in ascending order per row. */ struct csr_idx_check { template<typename DType, typename IType, typename RType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const RType* indptr, const nnvm::dim_t ncols) { for (RType j = indptr[i]; j < indptr[i+1]; j++) { if (idx[j] >= ncols || idx[j] < 0 || (j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) { *out = kCSRIdxErr; break; } } } }; /*! * \brief Indices of RSPNDArray should be non-negative, * less than the size of first dimension and in ascending order */ struct rsp_idx_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const nnvm::dim_t end, const nnvm::dim_t nrows) { if ((i < end && idx[i+1] <= idx[i]) || idx[i] < 0 || idx[i] >= nrows) *out = kRSPIdxErr; } }; template<typename xpu> void CheckFormatWrapper(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check); /*! * \brief Check the validity of CSRNDArray. * \param rctx Execution context. * \param input Input NDArray of CSRStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kCSRStorage) << "CheckFormatCSRImpl is for CSRNDArray"; const mxnet::TShape shape = input.shape(); const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx); const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr); const mxnet::TShape storage_shape = input.storage_shape(); if ((shape.ndim() != 2) || (idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) || (indptr_shape[0] != shape[0] + 1) || (idx_shape[0] != storage_shape[0])) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kCSRShapeErr; }); return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), indptr_shape[0] - 1, idx_shape[0]); // no need to check indices if indices are empty if (idx_shape[0] != 0) { Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIdx).dptr<IType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]); } mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); }); } } /*! * \brief Check the validity of RowSparseNDArray. * \param rctx Execution context. * \param input Input NDArray of RowSparseStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kRowSparseStorage) << "CheckFormatRSPImpl is for RSPNDArray"; const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx); if (idx_shape[0] != input.storage_shape()[0]) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kRSPShapeErr; }); return; } if (idx_shape[0] == 0) { return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0], val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(), idx_shape[0] - 1, input.shape()[0]); mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); } } template<typename xpu> void CheckFormatImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { int stype = input.storage_type(); if (stype == kCSRStorage) { CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kRowSparseStorage) { CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kDefaultStorage) { // no-op for default storage } else { LOG(FATAL) << "Unknown storage type " << stype; } } /*! \brief Pick rows specified by user input index array from a row sparse ndarray * and save them in the output sparse ndarray. */ template<typename xpu> void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s, const NDArray& input_nd, const TBlob& idx_data, const OpReqType req, NDArray* output_nd); /* \brief Casts tensor storage type to the new type. */ template<typename xpu> void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output); /*! \brief returns true if all storage types in `vstorage` are the same as target `stype`. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype) { if (!vstorage.empty()) { for (const auto& i : vstorage) { if (i != stype) return false; } return true; } return false; } /*! \brief returns true if all storage types in `vstorage` are the same as target `stype1` * or `stype2'. Sets boolean if both found. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!vstorage.empty()) { uint8_t has = 0; for (const auto i : vstorage) { if (i == stype1) { has |= 1; } else if (i == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as target `stype`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() != stype) { return false; } } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as targets `stype1` or `stype2`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!ndarrays.empty()) { uint8_t has = 0; for (const auto& nd : ndarrays) { const NDArrayStorageType stype = nd.storage_type(); if (stype == stype1) { has |= 1; } else if (stype == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if storage type of any array in `ndarrays` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() == stype) { return true; } } } return false; } /*! \brief returns true if any storage type `ndstype` in `ndstypes` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<int>& ndstypes, const NDArrayStorageType stype) { if (!ndstypes.empty()) { for (const auto& ndstype : ndstypes) { if (ndstype == stype) { return true; } } } return false; } /*! \brief get string representation of dispatch_mode */ inline std::string dispatch_mode_string(const DispatchMode x) { switch (x) { case DispatchMode::kFCompute: return "fcompute"; case DispatchMode::kFComputeEx: return "fcompute_ex"; case DispatchMode::kFComputeFallback: return "fcompute_fallback"; case DispatchMode::kVariable: return "variable"; case DispatchMode::kUndefined: return "undefined"; } return "unknown"; } /*! \brief get string representation of storage_type */ inline std::string stype_string(const int x) { switch (x) { case kDefaultStorage: return "default"; case kCSRStorage: return "csr"; case kRowSparseStorage: return "row_sparse"; } return "unknown"; } /*! \brief get string representation of device type */ inline std::string dev_type_string(const int dev_type) { switch (dev_type) { case Context::kCPU: return "cpu"; case Context::kGPU: return "gpu"; case Context::kCPUPinned: return "cpu_pinned"; case Context::kCPUShared: return "cpu_shared"; } return "unknown"; } inline std::string attr_value_string(const nnvm::NodeAttrs& attrs, const std::string& attr_name, std::string default_val = "") { if (attrs.dict.find(attr_name) == attrs.dict.end()) { return default_val; } return attrs.dict.at(attr_name); } /*! \brief get string representation of the operator stypes */ inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>& in_attrs, const std::vector<int>& out_attrs) { std::ostringstream os; os << "operator = " << attrs.op->name << "\ninput storage types = ["; for (const int attr : in_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "output storage types = ["; for (const int attr : out_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "params = {"; for (auto kv : attrs.dict) { os << "\"" << kv.first << "\" : " << kv.second << ", "; } os << "}\n" << "context.dev_mask = " << dev_type_string(dev_mask); return os.str(); } /*! \brief get string representation of the operator */ inline std::string operator_string(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { std::string result = ""; std::vector<int> in_stypes; std::vector<int> out_stypes; in_stypes.reserve(inputs.size()); out_stypes.reserve(outputs.size()); auto xform = [](const NDArray arr) -> int { return arr.storage_type(); }; std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform); std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform); result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes); return result; } /*! \brief log message once. Intended for storage fallback warning messages. */ inline void LogOnce(const std::string& message) { typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore; auto log_store = LogStore::Get(); if (log_store->find(message) == log_store->end()) { LOG(INFO) << message; log_store->insert(message); } } /*! \brief log storage fallback event */ inline void LogStorageFallback(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>* in_attrs, const std::vector<int>* out_attrs) { static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true); if (!log) return; const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs); std::ostringstream os; const char* warning = "\nThe operator with default storage type will be dispatched " "for execution. You're seeing this warning message because the operator above is unable " "to process the given ndarrays with specified storage types, context and parameter. " "Temporary dense ndarrays are generated in order to execute the operator. " "This does not affect the correctness of the programme. " "You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to " "0 to suppress this warning."; os << "\nStorage type fallback detected:\n" << op_str << warning; LogOnce(os.str()); #if MXNET_USE_MKLDNN == 1 if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. " "You can re-enable by setting MXNET_MKLDNN_ENABLED=1"); if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set." "Should only be set if " "your model has variable input shapes, " "as cache size may grow unbounded"); #endif } // heuristic to dermine number of threads per GPU inline int GetNumThreadsPerGPU() { // This is resource efficient option. return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2); } // heuristic to get number of matching colors. // this decides how much parallelism we can get in each GPU. inline int GetExecNumMatchColor() { // This is resource efficient option. int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1); return std::min(num_match_color, GetNumThreadsPerGPU()); } template<typename T, typename V> V ParallelAccumulate(const T* a, const int n, V start) { V sum = start; #pragma omp parallel for reduction(+:sum) for (int i = 0; i < n; ++i) { sum += a[i]; } return sum; } /*! * \brief * Helper function for ParallelSort. * DO NOT call this function directly. * Use the interface ParallelSort instead. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) { if (len < grainsize) { std::sort(first, first+len, comp); } else { std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp); ParallelSortHelper(first+len/2, len - len/2, grainsize, comp); thr.join(); std::inplace_merge(first, first+len/2, first+len, comp); } } /*! * \brief * Sort the elements in the range [first, last) into the ascending order defined by * the comparator comp. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) { const auto num = std::distance(first, last); size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16)); ParallelSortHelper(first, num, grainsize, comp); } /*! * \brief * Sort the elements in the range [first, last) into ascending order. * The elements are compared using the default < operator. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) { ParallelSort(first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>()); } /*! * \brief Random Engine */ typedef std::mt19937 RANDOM_ENGINE; /*! * \brief Helper functions. */ namespace helper { /*! * \brief Helper for non-array type `T`. */ template <class T> struct UniqueIf { /*! * \brief Type of `T`. */ using SingleObject = std::unique_ptr<T>; }; /*! * \brief Helper for an array of unknown bound `T`. */ template <class T> struct UniqueIf<T[]> { /*! * \brief Type of `T`. */ using UnknownBound = std::unique_ptr<T[]>; }; /*! * \brief Helper for an array of known bound `T`. */ template <class T, size_t kSize> struct UniqueIf<T[kSize]> { /*! * \brief Type of `T`. */ using KnownBound = void; }; } // namespace helper /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs a non-array type `T`. The arguments `args` are passed to the * constructor of `T`. The function does not participate in the overload * resolution if `T` is an array type. */ template <class T, class... Args> typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param n The size of the array to construct. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs an array of unknown bound `T`. The function does not participate * in the overload resolution unless `T` is an array of unknown bound. */ template <class T> typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) { using U = typename std::remove_extent<T>::type; return std::unique_ptr<T>(new U[n]{}); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * * Constructs an arrays of known bound is disallowed. */ template <class T, class... Args> typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete; template<typename FCompType> FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) { static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>"); static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>"); if (ctx.dev_mask() == cpu::kDevMask) { return fcompute_cpu.get(op, nullptr); } else if (ctx.dev_mask() == gpu::kDevMask) { return fcompute_gpu.get(op, nullptr); } else { LOG(FATAL) << "Unknown device mask " << ctx.dev_mask(); return nullptr; } } /*! * \brief Return the max integer value representable in the type `T` without loss of precision. */ template <typename T> constexpr size_t MaxIntegerValue() { return std::is_integral<T>::value ? std::numeric_limits<T>::max(): size_t(2) << (std::numeric_limits<T>::digits - 1); } template <> constexpr size_t MaxIntegerValue<mshadow::half::half_t>() { return size_t(2) << 10; } template <> constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() { return size_t(2) << 14; } MSHADOW_XINLINE int ilog2ul(size_t a) { int k = 1; while (a >>= 1) ++k; return k; } MSHADOW_XINLINE int ilog2ui(unsigned int a) { int k = 1; while (a >>= 1) ++k; return k; } /*! * \brief Return an NDArray of all zeros. */ inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape, const Context &ctx, const int dtype) { // NDArray with default storage if (stype == kDefaultStorage) { NDArray ret(shape, ctx, false, dtype); ret = 0; return ret; } // NDArray with non-default storage. Storage allocation is always delayed. return NDArray(stype, shape, ctx, true, dtype); } /*! * \brief Helper to add a NDArray of zeros to a std::vector. */ inline void EmplaceBackZeros(const NDArrayStorageType stype, const mxnet::TShape &shape, const Context &ctx, const int dtype, std::vector<NDArray> *vec) { // NDArray with default storage if (stype == kDefaultStorage) { vec->emplace_back(shape, ctx, false, dtype); vec->back() = 0; } else { // NDArray with non-default storage. Storage allocation is always delayed. vec->emplace_back(stype, shape, ctx, true, dtype); } } /*! * \brief parallelize copy by OpenMP. */ template<typename DType> inline void ParallelCopy(DType* dst, const DType* src, index_t size) { static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000); if (size >= copy_block_size) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t i = 0; i < size; ++i) { dst[i] = src[i]; } } else { #pragma GCC diagnostic push #if __GNUC__ >= 8 #pragma GCC diagnostic ignored "-Wclass-memaccess" #endif std::memcpy(dst, src, sizeof(DType) * size); #pragma GCC diagnostic pop } } /*! * \breif parallelize add by OpenMP */ template<typename DType> inline void ParallelAdd(DType* dst, const DType* src, index_t size) { static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000); if (size >= add_block_size) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t i = 0; i < size; ++i) { dst[i] += src[i]; } } else { for (index_t i = 0; i < size; ++i) { dst[i] += src[i]; } } } /*! * \brief If numpy compatibility is turned off (default), the shapes passed in * by users follow the legacy shape definition: * 1. 0 ndim means the shape is completely unknown. * 2. 0 dim size means the dim size is unknown. * We need to convert those shapes to use the numpy shape definition: * 1. 0 ndim means it's a scalar tensor. * 2. -1 ndim means the shape is unknown. * 3. 0 dim size means no elements in that dimension. * 4. -1 dim size means the dimension's size is unknown. * so that operator's infer shape function can work in backend. * \param shape to be converted. * Note: It is possible that the shape to be converted is already * numpy compatible. For example, when a subgraph operator's infer * shape function is called from the infer shape pass of the whole * graph, its input/output shapes have been converted to numpy * compatible shapes. */ inline void ConvertToNumpyShape(mxnet::TShape* shape) { if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown *shape = mxnet::TShape(); // unknown shape ndim = -1 } else { for (int j = 0; j < shape->ndim(); ++j) { if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown (*shape)[j] = -1; // unknown dim size = -1 } } } } inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) { for (size_t i = 0; i < shapes->size(); ++i) { ConvertToNumpyShape(&(shapes->at(i))); } } /*! * \brief This is function is used to convert shapes returned by * the infer shape functions/pass to the legacy shape definition. */ inline void ConvertToLegacyShape(mxnet::TShape* shape) { if (!mxnet::ndim_is_known(*shape)) { *shape = mxnet::TShape(0, -1); } else { for (int j = 0; j < shape->ndim(); ++j) { if (!mxnet::dim_size_is_known(*shape, j)) { (*shape)[j] = 0; } } } } inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) { for (size_t i = 0; i < shapes->size(); ++i) { ConvertToLegacyShape(&(shapes->at(i))); } } void ExecuteMonInputCallback( const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays, size_t nid, const std::function<void(const char *, const char *, void *)> &monitor_callback); void ExecuteMonOutputCallback( const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays, size_t nid, const std::function<void(const char *, const char *, void *)> &monitor_callback); inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) { // convert negative axes to positive values const int ndim = src.ndim(); mxnet::TShape axes = src; for (int i = 0; i < ndim; ++i) { if (axes[i] < 0) { axes[i] += ndim; } CHECK(axes[i] >= 0 && axes[i] < ndim) << "axes[" << i << "]=" << axes[i] << " exceeds the range [" << 0 << ", " << ndim << ")"; } return axes; } inline bool is_float(const int dtype) { return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16; } inline bool is_int(const int dtype) { return dtype == mshadow::kUint8 || dtype == mshadow::kInt8 || dtype == mshadow::kInt32 || dtype == mshadow::kInt64; } inline int get_more_precise_type(const int type1, const int type2) { if (type1 == type2) return type1; if (is_float(type1) && is_float(type2)) { if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) { return mshadow::kFloat64; } if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) { return mshadow::kFloat32; } return mshadow::kFloat16; } else if (is_float(type1) || is_float(type2)) { return is_float(type1) ? type1 : type2; } if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) { return mshadow::kInt64; } if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) { return mshadow::kInt32; } CHECK(!((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) || (type1 == mshadow::kInt8 && type2 == mshadow::kUint8))) << "1 is UInt8 and 1 is Int8 should not get here"; if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) { return mshadow::kUint8; } return mshadow::kInt8; } inline int np_binary_out_infer_type(const int type1, const int type2) { if ((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) || (type1 == mshadow::kInt8 && type2 == mshadow::kUint8)) { return mshadow::kInt32; } return get_more_precise_type(type1, type2); } inline const std::string NodeAttrsGetProfilerScope(const nnvm::NodeAttrs& attrs) { // obtain the profiler scope name, if assigned previously std::string profiler_scope = MXNET_STORAGE_DEFAULT_PROFILER_SCOPE_CSTR; const std::unordered_map<std::string, std::string>& node_attrs_dict = attrs.dict; const std::unordered_map<std::string, std::string>::const_iterator profiler_scope_iter = node_attrs_dict.find("__profiler_scope__"); if (profiler_scope_iter != node_attrs_dict.end()) { profiler_scope = profiler_scope_iter->second; } return profiler_scope; } inline int GetDefaultDtype() { return Imperative::Get()->is_np_default_dtype() ? mshadow::kFloat64 : mshadow::kFloat32; } inline int GetDefaultDtype(int dtype) { if (dtype != -1) return dtype; return Imperative::Get()->is_np_default_dtype() ? mshadow::kFloat64 : mshadow::kFloat32; } struct MShadowTypeInfo { std::string name; int size; int acc_size; MShadowTypeInfo(const std::string name, const int size, const int acc_size) : name(std::move(name)), size(size), acc_size(acc_size) {} MShadowTypeInfo(const std::string name, const int size) : MShadowTypeInfo(name, size, size) {} }; MShadowTypeInfo mshadow_type_info(const int type_flag); inline bool AlignedMemAlloc(void** ptr, size_t size, size_t alignment) { #if _MSC_VER *ptr = _aligned_malloc(size, alignment); if (*ptr == nullptr) return false; #else int res = posix_memalign(ptr, alignment, size); if (res != 0) return false; #endif return true; } inline void AlignedMemFree(void* ptr) { #if _MSC_VER _aligned_free(ptr); #else free(ptr); #endif } } // namespace common } // namespace mxnet #endif // MXNET_COMMON_UTILS_H_
pomp.h
// license:GPL-2.0+ // copyright-holders:Couriersud /* * pomp.h * * Wrap all OPENMP stuff here in a hopefully c++ compliant way. */ #ifndef POMP_H_ #define POMP_H_ #include "pconfig.h" #if HAS_OPENMP #include "omp.h" #endif namespace plib { namespace omp { template <class T> void for_static(const int start, const int end, const T &what) { #if HAS_OPENMP && USE_OPENMP #pragma omp parallel #endif { #if HAS_OPENMP && USE_OPENMP #pragma omp for schedule(static) #endif for (int i = start; i < end; i++) what(i); } } inline void set_num_threads(const int threads) { #if HAS_OPENMP && USE_OPENMP omp_set_num_threads(threads); #endif } inline int get_max_threads() { #if HAS_OPENMP && USE_OPENMP return omp_get_max_threads(); #else return 1; #endif } // ---------------------------------------------------------------------------------------- // pdynlib: dynamic loading of libraries ... // ---------------------------------------------------------------------------------------- } } #endif /* PSTRING_H_ */
normal.c
// RUN: %libomp-compile-and-run | FileCheck %s // RUN: %libomp-compile-and-run | %sort-threads | FileCheck --check-prefix=THREADS %s // REQUIRES: ompt #include "callback.h" int main() { #pragma omp parallel num_threads(4) { print_ids(0); print_ids(1); } print_fuzzy_address(1); // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_thread_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_thread_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: parallel_data initially not null // CHECK-NOT: 0: task_data initially not null // CHECK-NOT: 0: thread_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER:[0-9]+]] // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // Note that we cannot ensure that the worker threads have already called barrier_end and implicit_task_end before parallel_end! // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]], codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}} // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // THREADS: 0: NULL_POINTER=[[NULL:.*$]] // THREADS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_initial=1, thread_id=[[MASTER_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker={{.*}} // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]], codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] return 0; }
ccl_correlation.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <gsl/gsl_integration.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_roots.h> #include <gsl/gsl_spline.h> #include <gsl/gsl_sf_bessel.h> #include <gsl/gsl_sf_legendre.h> #include "ccl.h" /*--------ROUTINE: taper_cl ------ TASK:n Apply cosine tapering to Cls to reduce aliasing INPUT: number of ell bins for Cl, ell vector, C_ell vector, limits for tapering e.g., ell_limits=[low_ell_limit_lower,low_ell_limit_upper,high_ell_limit_lower,high_ell_limit_upper] */ static int taper_cl(int n_ell,double *ell,double *cl, double *ell_limits) { for(int i=0;i<n_ell;i++) { if(ell[i]<ell_limits[0] || ell[i]>ell_limits[3]) { cl[i]=0;//ell outside desirable range continue; } if(ell[i]>=ell_limits[1] && ell[i]<=ell_limits[2]) continue;//ell within good ell range if(ell[i]<ell_limits[1])//tapering low ell cl[i]*=cos((ell[i]-ell_limits[1])/(ell_limits[1]-ell_limits[0])*M_PI/2.); if(ell[i]>ell_limits[2])//tapering high ell cl[i]*=cos((ell[i]-ell_limits[2])/(ell_limits[3]-ell_limits[2])*M_PI/2.); } return 0; } /*--------ROUTINE: ccl_tracer_corr_fftlog ------ TASK: For a given tracer, get the correlation function Following function takes a function to calculate angular cl as well. By default above function will call it using ccl_angular_cl INPUT: type of tracer, number of theta values to evaluate = NL, theta vector */ static void ccl_tracer_corr_fftlog(ccl_cosmology *cosmo, int n_ell,double *ell,double *cls, int n_theta,double *theta,double *wtheta, int corr_type,int do_taper_cl,double *taper_cl_limits, int *status) { int i; double *l_arr,*cl_arr,*th_arr,*wth_arr; l_arr=ccl_log_spacing(cosmo->spline_params.ELL_MIN_CORR,cosmo->spline_params.ELL_MAX_CORR,cosmo->spline_params.N_ELL_CORR); if(l_arr==NULL) { *status=CCL_ERROR_LINSPACE; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog(): ran out of memory\n"); return; } cl_arr=malloc(cosmo->spline_params.N_ELL_CORR*sizeof(double)); if(cl_arr==NULL) { free(l_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog(): ran out of memory\n"); return; } //Interpolate input Cl into array needed for FFTLog ccl_f1d_t *cl_spl=ccl_f1d_t_new(n_ell,ell,cls,cls[0],0, ccl_f1d_extrap_const, ccl_f1d_extrap_logx_logy, status); if (*status) { free(l_arr); free(cl_arr); ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog(): " "failed to create spline\n"); if (cl_spl) ccl_f1d_t_free(cl_spl); return; } if(cl_spl==NULL) { free(l_arr); free(cl_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog(): " "ran out of memory\n"); return; } for(i=0;i<cosmo->spline_params.N_ELL_CORR;i++) cl_arr[i]=ccl_f1d_t_eval(cl_spl,l_arr[i]); ccl_f1d_t_free(cl_spl); if (do_taper_cl) taper_cl(cosmo->spline_params.N_ELL_CORR,l_arr,cl_arr,taper_cl_limits); th_arr=malloc(sizeof(double)*cosmo->spline_params.N_ELL_CORR); if(th_arr==NULL) { free(l_arr); free(cl_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog(): " "ran out of memory\n"); return; } wth_arr=(double *)malloc(sizeof(double)*cosmo->spline_params.N_ELL_CORR); if(wth_arr==NULL) { free(l_arr); free(cl_arr); free(th_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog(): " "ran out of memory\n"); return; } for(i=0;i<cosmo->spline_params.N_ELL_CORR;i++) th_arr[i]=0; //Although set here to 0, theta is modified by FFTlog to obtain the correlation at ~1/l int i_bessel=0; if(corr_type==CCL_CORR_GG) i_bessel=0; if(corr_type==CCL_CORR_GL) i_bessel=2; if(corr_type==CCL_CORR_LP) i_bessel=0; if(corr_type==CCL_CORR_LM) i_bessel=4; ccl_fftlog_ComputeXi2D(i_bessel,0, 1, cosmo->spline_params.N_ELL_CORR,l_arr,&cl_arr, th_arr,&wth_arr, status); // Interpolate to output values of theta ccl_f1d_t *wth_spl=ccl_f1d_t_new(cosmo->spline_params.N_ELL_CORR,th_arr, wth_arr,wth_arr[0],0, ccl_f1d_extrap_const, ccl_f1d_extrap_const, status); if (wth_spl == NULL) { free(l_arr); free(cl_arr); free(th_arr); free(wth_arr); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog(): " "ran out of memory\n"); return; } for(i=0;i<n_theta;i++) wtheta[i]=ccl_f1d_t_eval(wth_spl,theta[i]*M_PI/180.); ccl_f1d_t_free(wth_spl); free(l_arr); free(cl_arr); free(th_arr); free(wth_arr); return; } typedef struct { ccl_f1d_t *cl_spl; int i_bessel; double th; } corr_int_par; static double corr_bessel_integrand(double l,void *params) { double cl,jbes; corr_int_par *p=(corr_int_par *)params; double x=l*p->th; cl=ccl_f1d_t_eval(p->cl_spl,l); jbes=gsl_sf_bessel_Jn(p->i_bessel,x); return l*jbes*cl; } static void ccl_tracer_corr_bessel(ccl_cosmology *cosmo, int n_ell,double *ell,double *cls, int n_theta,double *theta,double *wtheta, int corr_type,int *status) { corr_int_par cp; ccl_f1d_t *cl_spl = NULL; cl_spl = ccl_f1d_t_new(n_ell, ell, cls, cls[0], 0, ccl_f1d_extrap_const, ccl_f1d_extrap_logx_logy, status); if(cl_spl == NULL) { *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message( cosmo, "ccl_correlation.c: ccl_tracer_corr_bessel(): " "ran out of memory\n"); return; } int ith, gslstatus; double result,eresult; gsl_function F; gsl_integration_workspace *w = NULL; int local_status; #pragma omp parallel default(none) \ shared(cosmo, status, wtheta, n_ell, ell, cls, \ corr_type, cl_spl, theta, n_theta) \ private(w, F, result, eresult, local_status, ith, \ gslstatus, cp) { local_status = *status; switch(corr_type) { case CCL_CORR_GG: cp.i_bessel = 0; break; case CCL_CORR_GL: cp.i_bessel = 2; break; case CCL_CORR_LP: cp.i_bessel = 0; break; case CCL_CORR_LM: cp.i_bessel = 4; break; } cp.cl_spl = cl_spl; w = gsl_integration_workspace_alloc(cosmo->gsl_params.N_ITERATION); if (w == NULL) { local_status = CCL_ERROR_MEMORY; } F.function = &corr_bessel_integrand; F.params = &cp; #pragma omp for schedule(dynamic) for(ith=0; ith < n_theta; ith++) { if (local_status == 0) { cp.th = theta[ith]*M_PI/180; //TODO: Split into intervals between first bessel zeros before integrating //This will help both speed and accuracy of the integral. gslstatus = gsl_integration_qag(&F, 0, cosmo->spline_params.ELL_MAX_CORR, 0, cosmo->gsl_params.INTEGRATION_EPSREL, cosmo->gsl_params.N_ITERATION, cosmo->gsl_params.INTEGRATION_GAUSS_KRONROD_POINTS, w, &result, &eresult); if(gslstatus != GSL_SUCCESS) { ccl_raise_gsl_warning(gslstatus, "ccl_correlation.c: ccl_tracer_corr_bessel():"); local_status |= gslstatus; } wtheta[ith] = result/(2*M_PI); } } if (local_status) { #pragma omp atomic write *status = local_status; } gsl_integration_workspace_free(w); } ccl_f1d_t_free(cl_spl); } /*--------ROUTINE: ccl_compute_legendre_polynomial ------ TASK: Compute input factor for ccl_tracer_corr_legendre INPUT: tracer 1, tracer 2, i_bessel, theta array, n_theta, L_max, output Pl_theta */ static void ccl_compute_legendre_polynomial(int corr_type,double theta,int ell_max,double *Pl_theta) { int j; double cth=cos(theta*M_PI/180); //Initialize Pl_theta for (j=0;j<=ell_max;j++) Pl_theta[j]=0.; if(corr_type==CCL_CORR_GG) { gsl_sf_legendre_Pl_array(ell_max,cth,Pl_theta); for (j=0;j<=ell_max;j++) Pl_theta[j]*=(2*j+1); } else if(corr_type==CCL_CORR_GL) { for (j=2;j<=ell_max;j++) {//https://arxiv.org/pdf/1007.4809.pdf Pl_theta[j]=gsl_sf_legendre_Plm(j,2,cth); Pl_theta[j]*=(2*j+1.)/((j+0.)*(j+1.)); } } } /*--------ROUTINE: ccl_tracer_corr_legendre ------ TASK: Compute correlation function via Legendre polynomials INPUT: cosmology, number of theta bins, theta array, tracer 1, tracer 2, i_bessel, boolean for tapering, vector of tapering limits, correlation vector, angular_cl function. */ static void ccl_tracer_corr_legendre(ccl_cosmology *cosmo, int n_ell,double *ell,double *cls, int n_theta,double *theta,double *wtheta, int corr_type,int do_taper_cl,double *taper_cl_limits, int *status) { int i; double *l_arr = NULL, *cl_arr = NULL, *Pl_theta = NULL; ccl_f1d_t *cl_spl; if(corr_type==CCL_CORR_LM || corr_type==CCL_CORR_LP){ *status=CCL_ERROR_NOT_IMPLEMENTED; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_legendre(): " "CCL does not support full-sky xi+- calcuations.\nhttps://arxiv.org/abs/1702.05301 indicates flat-sky to be sufficient.\n"); } if(*status==0) { l_arr=malloc(((int)(cosmo->spline_params.ELL_MAX_CORR)+1)*sizeof(double)); if(l_arr==NULL) { *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_legendre(): " "ran out of memory\n"); } } if(*status==0) { cl_arr=malloc(((int)(cosmo->spline_params.ELL_MAX_CORR)+1)*sizeof(double)); if(cl_arr==NULL) { *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_legendre(): " "ran out of memory\n"); } } if(*status==0) { //Interpolate input Cl into cl_spl=ccl_f1d_t_new(n_ell,ell,cls,cls[0],0, ccl_f1d_extrap_const, ccl_f1d_extrap_logx_logy, status); if(cl_spl==NULL) { *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_legendre(): " "ran out of memory\n"); } } if(*status==0) { for(i=0;i<=(int)(cosmo->spline_params.ELL_MAX_CORR);i++) { double l=(double)i; l_arr[i]=l; cl_arr[i]=ccl_f1d_t_eval(cl_spl,l); } ccl_f1d_t_free(cl_spl); if (do_taper_cl) *status=taper_cl((int)(cosmo->spline_params.ELL_MAX_CORR)+1,l_arr,cl_arr,taper_cl_limits); } int local_status, i_L; #pragma omp parallel default(none) \ shared(cosmo, theta, cl_arr, wtheta, n_theta, status, corr_type) \ private(Pl_theta, i, i_L, local_status) { Pl_theta = NULL; local_status = *status; if (local_status == 0) { Pl_theta = malloc(sizeof(double)*((int)(cosmo->spline_params.ELL_MAX_CORR)+1)); if (Pl_theta == NULL) { local_status = CCL_ERROR_MEMORY; } } #pragma omp for schedule(dynamic) for (int i=0; i < n_theta; i++) { if (local_status == 0) { wtheta[i] = 0; ccl_compute_legendre_polynomial(corr_type, theta[i], (int)(cosmo->spline_params.ELL_MAX_CORR), Pl_theta); for (i_L=1; i_L < (int)(cosmo->spline_params.ELL_MAX_CORR); i_L+=1) wtheta[i] += cl_arr[i_L]*Pl_theta[i_L]; wtheta[i] /= (M_PI*4); } } if (local_status) { #pragma omp atomic write *status = local_status; } free(Pl_theta); } free(l_arr); free(cl_arr); } /*--------ROUTINE: ccl_tracer_corr ------ TASK: For a given tracer, get the correlation function. Do so by running ccl_angular_cls. If you already have Cls calculated, go to the next function to pass them directly. INPUT: cosmology, number of theta values to evaluate = NL, theta vector, tracer 1, tracer 2, i_bessel, key for tapering, limits of tapering correlation function. */ void ccl_correlation(ccl_cosmology *cosmo, int n_ell,double *ell,double *cls, int n_theta,double *theta,double *wtheta, int corr_type,int do_taper_cl,double *taper_cl_limits,int flag_method, int *status) { switch(flag_method) { case CCL_CORR_FFTLOG : ccl_tracer_corr_fftlog(cosmo,n_ell,ell,cls,n_theta,theta,wtheta,corr_type, do_taper_cl,taper_cl_limits,status); break; case CCL_CORR_LGNDRE : ccl_tracer_corr_legendre(cosmo,n_ell,ell,cls,n_theta,theta,wtheta,corr_type, do_taper_cl,taper_cl_limits,status); break; case CCL_CORR_BESSEL : ccl_tracer_corr_bessel(cosmo,n_ell,ell,cls,n_theta,theta,wtheta,corr_type,status); break; default : *status=CCL_ERROR_INCONSISTENT; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation(): Unknown algorithm\n"); } } /*--------ROUTINE: ccl_correlation_3d ------ TASK: Calculate the 3d-correlation function. Do so by using FFTLog. INPUT: cosmology, scale factor a, number of r values, r values, key for tapering, limits of tapering Correlation function result will be in array xi */ void ccl_correlation_3d(ccl_cosmology *cosmo, ccl_f2d_t *psp, double a, int n_r,double *r,double *xi, int do_taper_pk,double *taper_pk_limits, int *status) { int i,N_ARR; double *k_arr,*pk_arr,*r_arr,*xi_arr; //number of data points for k and pk array N_ARR=(int)(cosmo->spline_params.N_K_3DCOR*log10(cosmo->spline_params.K_MAX/cosmo->spline_params.K_MIN)); k_arr=ccl_log_spacing(cosmo->spline_params.K_MIN,cosmo->spline_params.K_MAX,N_ARR); if(k_arr==NULL) { *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d(): ran out of memory\n"); return; } pk_arr=malloc(N_ARR*sizeof(double)); if(pk_arr==NULL) { free(k_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d(): ran out of memory\n"); return; } for (i=0; i<N_ARR; i++){ pk_arr[i] = ccl_f2d_t_eval(psp, log(k_arr[i]), a, cosmo, status); } if (do_taper_pk) taper_cl(N_ARR,k_arr,pk_arr,taper_pk_limits); r_arr=malloc(sizeof(double)*N_ARR); if(r_arr==NULL) { free(k_arr); free(pk_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d(): ran out of memory\n"); return; } xi_arr=malloc(sizeof(double)*N_ARR); if(xi_arr==NULL) { free(k_arr); free(pk_arr); free(r_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d(): ran out of memory\n"); return; } for(i=0;i<N_ARR;i++) r_arr[i]=0; ccl_fftlog_ComputeXi3D(0, 0, 1, N_ARR, k_arr, &pk_arr, r_arr, &xi_arr, status); // Interpolate to output values of r ccl_f1d_t *xi_spl=ccl_f1d_t_new(N_ARR,r_arr,xi_arr,xi_arr[0],0, ccl_f1d_extrap_const, ccl_f1d_extrap_const, status); if (xi_spl == NULL) { free(k_arr); free(pk_arr); free(r_arr); free(xi_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d(): ran out of memory\n"); return; } for(i=0;i<n_r;i++) xi[i]=ccl_f1d_t_eval(xi_spl,r[i]); ccl_f1d_t_free(xi_spl); free(k_arr); free(pk_arr); free(r_arr); free(xi_arr); return; } /*--------ROUTINE: ccl_correlation_multipole ------ TASK: Calculate multipole of the redshift space correlation function. Do so using FFTLog. INPUT: cosmology, scale factor a, beta (= growth rate / bias), multipole order l = 0, 2, or 4, number of s values, s values Multipole function result will be in array xi */ void ccl_correlation_multipole(ccl_cosmology *cosmo, ccl_f2d_t *psp, double a, double beta, int l, int n_s, double *s, double *xi, int *status) { int i, N_ARR; double *k_arr, *pk_arr, *s_arr, *xi_arr, *xi_arr0; N_ARR = (int)(cosmo->spline_params.N_K_3DCOR * log10(cosmo->spline_params.K_MAX / cosmo->spline_params.K_MIN)); k_arr = ccl_log_spacing(cosmo->spline_params.K_MIN, cosmo->spline_params.K_MAX, N_ARR); if (k_arr == NULL) { *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole(): ran out of memory\n"); return; } pk_arr = malloc(N_ARR * sizeof(double)); if (pk_arr == NULL) { free(k_arr); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole(): ran out of memory\n"); return; } for (i = 0; i < N_ARR; i++) pk_arr[i] = ccl_f2d_t_eval(psp, log(k_arr[i]), a, cosmo, status); s_arr = malloc(sizeof(double) * N_ARR); if (s_arr == NULL) { free(k_arr); free(pk_arr); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole(): ran out of memory\n"); return; } xi_arr = malloc(sizeof(double) * N_ARR); if (xi_arr == NULL) { free(k_arr); free(pk_arr); free(s_arr); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole(): ran out of memory\n"); return; } xi_arr0 = malloc(sizeof(double) * N_ARR); if (xi_arr0 == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole(): ran out of memory\n"); return; } for (i = 0; i < N_ARR; i++) s_arr[i] = 0; // Calculate multipoles if (l == 0) { ccl_fftlog_ComputeXi3D(0, 0, 1, N_ARR, k_arr, &pk_arr, s_arr, &xi_arr0, status); for (i = 0; i < N_ARR; i++) xi_arr[i] = (1. + 2. / 3 * beta + 1. / 5 * beta * beta) * xi_arr0[i]; } else if (l == 2) { ccl_fftlog_ComputeXi3D(2, 0, 1, N_ARR, k_arr, &pk_arr, s_arr, &xi_arr0, status); for (i = 0; i < N_ARR; i++) xi_arr[i] = -(4. / 3 * beta + 4. / 7 * beta * beta) * xi_arr0[i]; } else if (l == 4) { ccl_fftlog_ComputeXi3D(4, 0, 1, N_ARR, k_arr, &pk_arr, s_arr, &xi_arr0, status); for (i = 0; i < N_ARR; i++) xi_arr[i] = 8. / 35 * beta * beta * xi_arr0[i]; } else { ccl_cosmology_set_status_message(cosmo, "unavailable value of l\n"); return; } // Interpolate to output values of s ccl_f1d_t *xi_spl = ccl_f1d_t_new(N_ARR, s_arr, xi_arr, xi_arr[0], 0, ccl_f1d_extrap_const, ccl_f1d_extrap_const, status); if (xi_spl == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole(): ran out of memory\n"); } for (i = 0; i < n_s; i++) xi[i] = ccl_f1d_t_eval(xi_spl,s[i]); ccl_f1d_t_free(xi_spl); free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); return; } /*--------ROUTINE: ccl_correlation_multipole_spline ------ TASK: Store multipoles of the redshift-space correlation in global splines INPUT: cosmology, scale factor a Result is stored in cosmo->data.rsd_splines[] */ void ccl_correlation_multipole_spline(ccl_cosmology *cosmo, ccl_f2d_t *psp, double a, int *status) { int i, N_ARR; double *k_arr, *pk_arr, *s_arr, *xi_arr, *xi_arr0, *xi_arr2, *xi_arr4; N_ARR = (int)(cosmo->spline_params.N_K_3DCOR * log10(cosmo->spline_params.K_MAX / cosmo->spline_params.K_MIN)); k_arr = ccl_log_spacing(cosmo->spline_params.K_MIN, cosmo->spline_params.K_MAX, N_ARR); if (k_arr == NULL) { *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole_spline(): " "ran out of memory\n"); return; } pk_arr = malloc(N_ARR * sizeof(double)); if (pk_arr == NULL) { free(k_arr); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole_spline(): " "ran out of memory\n"); return; } for (i = 0; i < N_ARR; i++) pk_arr[i] = ccl_f2d_t_eval(psp, log(k_arr[i]), a, cosmo, status); s_arr = malloc(sizeof(double) * N_ARR); if (s_arr == NULL) { free(k_arr); free(pk_arr); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole_spline(): " "ran out of memory\n"); return; } xi_arr = malloc(sizeof(double) * N_ARR); if (xi_arr == NULL) { free(k_arr); free(pk_arr); free(s_arr); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole_spline(): " "ran out of memory\n"); return; } xi_arr0 = malloc(sizeof(double) * N_ARR); if (xi_arr0 == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole_spline(): " "ran out of memory\n"); return; } xi_arr2 = malloc(sizeof(double) * N_ARR); if (xi_arr2 == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole_spline(): " "ran out of memory\n"); return; } xi_arr4 = malloc(sizeof(double) * N_ARR); if (xi_arr4 == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole_spline(): " "ran out of memory\n"); return; } for (i = 0; i < N_ARR; i++) s_arr[i] = 0; // Calculate multipoles ccl_fftlog_ComputeXi3D(0, 0, 1, N_ARR, k_arr, &pk_arr, s_arr, &xi_arr0, status); ccl_fftlog_ComputeXi3D(2, 0, 1, N_ARR, k_arr, &pk_arr, s_arr, &xi_arr2, status); ccl_fftlog_ComputeXi3D(4, 0, 1, N_ARR, k_arr, &pk_arr, s_arr, &xi_arr4, status); // free any memory that may have been allocated ccl_f1d_t_free(cosmo->data.rsd_splines[0]); ccl_f1d_t_free(cosmo->data.rsd_splines[1]); ccl_f1d_t_free(cosmo->data.rsd_splines[2]); cosmo->data.rsd_splines[0] = NULL; cosmo->data.rsd_splines[1] = NULL; cosmo->data.rsd_splines[1] = NULL; // Interpolate to output values of s cosmo->data.rsd_splines[0] = ccl_f1d_t_new(N_ARR, s_arr, xi_arr0, xi_arr0[0], 0, ccl_f1d_extrap_const, ccl_f1d_extrap_const, status); if (cosmo->data.rsd_splines[0] == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); free(xi_arr4); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole_spline(): " "ran out of memory\n"); return; } cosmo->data.rsd_splines[1] = ccl_f1d_t_new(N_ARR, s_arr, xi_arr2, xi_arr2[0], 0, ccl_f1d_extrap_const, ccl_f1d_extrap_const, status); if (cosmo->data.rsd_splines[1] == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); free(xi_arr4); ccl_f1d_t_free(cosmo->data.rsd_splines[0]); cosmo->data.rsd_splines[0] = NULL; *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole_spline(): " "ran out of memory\n"); return; } cosmo->data.rsd_splines[2] = ccl_f1d_t_new(N_ARR, s_arr, xi_arr4, xi_arr4[0], 0, ccl_f1d_extrap_const, ccl_f1d_extrap_const, status); if (cosmo->data.rsd_splines[2] == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); free(xi_arr4); ccl_f1d_t_free(cosmo->data.rsd_splines[0]); cosmo->data.rsd_splines[0] = NULL; ccl_f1d_t_free(cosmo->data.rsd_splines[1]); cosmo->data.rsd_splines[1] = NULL; *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_multipole_spline(): " "ran out of memory\n"); return; } // set the scale factor cosmo->data.rsd_splines_scalefactor = a; free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); free(xi_arr4); return; } /*--------ROUTINE: ccl_correlation_3dRsd ------ TASK: Calculate the redshift-space correlation function. INPUT: cosmology, scale factor a, number of s values, s values, mu = cosine of galaxy separation angle w.r.t. line of sight, beta (= growth rate / bias), key for using spline Correlation function result will be in array xi */ void ccl_correlation_3dRsd(ccl_cosmology *cosmo, ccl_f2d_t *psp, double a, int n_s, double *s, double mu, double beta, double *xi, int use_spline, int *status) { int i; double *xi_arr0, *xi_arr2, *xi_arr4; if (use_spline == 0) { xi_arr0 = malloc(sizeof(double) * n_s); if (xi_arr0 == NULL) { *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3dRsd(): ran out of memory\n"); return; } xi_arr2 = malloc(sizeof(double) * n_s); if (xi_arr2 == NULL) { free(xi_arr0); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3dRsd(): ran out of memory\n"); return; } xi_arr4 = malloc(sizeof(double) * n_s); if (xi_arr4 == NULL) { free(xi_arr0); free(xi_arr2); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3dRsd(): ran out of memory\n"); return; } ccl_correlation_multipole(cosmo, psp, a, beta, 0, n_s, s, xi_arr0, status); ccl_correlation_multipole(cosmo, psp, a, beta, 2, n_s, s, xi_arr2, status); ccl_correlation_multipole(cosmo, psp, a, beta, 4, n_s, s, xi_arr4, status); for (i = 0; i < n_s; i++) xi[i] = xi_arr0[i] + xi_arr2[i] * gsl_sf_legendre_Pl(2, mu) + xi_arr4[i] * gsl_sf_legendre_Pl(4, mu); free(xi_arr0); free(xi_arr2); free(xi_arr4); } else { if ((cosmo->data.rsd_splines[0] == NULL) || (cosmo->data.rsd_splines[1] == NULL) || (cosmo->data.rsd_splines[2] == NULL) || (cosmo->data.rsd_splines_scalefactor != a)) ccl_correlation_multipole_spline(cosmo, psp, a, status); for (i = 0; i < n_s; i++) xi[i] = (1. + 2. / 3 * beta + 1. / 5 * beta * beta) * ccl_f1d_t_eval(cosmo->data.rsd_splines[0],s[i]) - (4. / 3 * beta + 4. / 7 * beta * beta) * ccl_f1d_t_eval(cosmo->data.rsd_splines[1],s[i]) * gsl_sf_legendre_Pl(2, mu) + 8. / 35 * beta * beta * ccl_f1d_t_eval(cosmo->data.rsd_splines[2],s[i]) * gsl_sf_legendre_Pl(4, mu); } return; } /*--------ROUTINE: ccl_correlation_3dRsd_avgmu ------ TASK: Calculate the average of redshift-space correlation function xi(s,mu) over mu at constant s INPUT: cosmology, scale factor a, number of s values, s values, beta (= growth rate / bias) The result will be in array xi */ void ccl_correlation_3dRsd_avgmu(ccl_cosmology *cosmo, ccl_f2d_t *psp, double a, int n_s, double *s, double beta, double *xi, int *status) { // The average is just the l=0 multipole - the higher multiples inetegrate to zero. ccl_correlation_multipole(cosmo, psp, a, beta, 0, n_s, s, xi, status); return; } /*--------ROUTINE: ccl_correlation_pi_sigma ------ TASK: Calculate the redshift-space correlation function using longitudinal and transverse coordinates pi and sigma. INPUT: cosmology, scale factor a, beta (= growth rate / bias), pi, number of sigma values, sigma values, key for using spline Correlation function result will be in array xi */ void ccl_correlation_pi_sigma(ccl_cosmology *cosmo, ccl_f2d_t *psp, double a, double beta, double pi, int n_sig, double *sig, double *xi, int use_spline, int *status) { int i; double *mu_arr, *s_arr, *xi_arr; mu_arr = malloc(sizeof(double) * n_sig); if (mu_arr == NULL) { *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_pi_sigma(): ran out of memory\n"); return; } s_arr = malloc(sizeof(double) * n_sig); if (s_arr == NULL) { free(mu_arr); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_pi_sigma(): ran out of memory\n"); return; } xi_arr = malloc(sizeof(double) * n_sig); if (xi_arr == NULL) { free(mu_arr); free(s_arr); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_pi_sigma(): ran out of memory\n"); return; } for (i = 0; i < n_sig; i++) { s_arr[i] = sqrt(pi * pi + sig[i] * sig[i]); mu_arr[i] = pi / s_arr[i]; } for (i = 0; i < n_sig; i++) { ccl_correlation_3dRsd(cosmo, psp, a, n_sig, s_arr, mu_arr[i], beta, xi_arr, use_spline, status); xi[i] = xi_arr[i]; } free(mu_arr); free(xi_arr); free(s_arr); return; }
kernel_cpu.ref.c
#include <sys/time.h> #include <time.h> #include <stdio.h> static unsigned long long current_time_ns() { #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec; return (unsigned long long)mts.tv_nsec + s; #else struct timespec t ={0,0}; clock_gettime(CLOCK_MONOTONIC, &t); unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec; return (((unsigned long long)t.tv_nsec)) + s; #endif } // #ifdef __cplusplus // extern "C" { // #endif //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // LIBRARIES //======================================================================================================================================================150 #include <omp.h> // (in path known to compiler) needed by openmp #include <stdlib.h> // (in path known to compiler) needed by malloc #include <stdio.h> // (in path known to compiler) needed by printf #include <math.h> // (in path known to compiler) needed by exp //======================================================================================================================================================150 // MAIN FUNCTION HEADER //======================================================================================================================================================150 #include "main.h" // (in the main program folder) needed to recognized input variables //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "timer.h" // (in library path specified to compiler) needed by timer //======================================================================================================================================================150 // KERNEL_CPU FUNCTION HEADER //======================================================================================================================================================150 #include "kernel_cpu.h" // (in the current directory) //========================================================================================================================================================================================================200 // PLASMAKERNEL_GPU //========================================================================================================================================================================================================200 void kernel_cpu( par_str par, dim_str dim, box_str* box, FOUR_VECTOR* rv, fp* qv, FOUR_VECTOR* fv) { //======================================================================================================================================================150 // Variables //======================================================================================================================================================150 // timer long long time0; time0 = get_time(); // timer long long time1; long long time2; long long time3; long long time4; // parameters fp alpha; fp a2; // counters int i, j, k, l; // home box long first_i; FOUR_VECTOR* rA; FOUR_VECTOR* fA; // neighbor box int pointer; long first_j; FOUR_VECTOR* rB; fp* qB; // common fp r2; fp u2; fp fs; fp vij; fp fxij,fyij,fzij; THREE_VECTOR d; time1 = get_time(); //======================================================================================================================================================150 // MCPU SETUP //======================================================================================================================================================150 time2 = get_time(); //======================================================================================================================================================150 // INPUTS //======================================================================================================================================================150 alpha = par.alpha; a2 = 2.0*alpha*alpha; time3 = get_time(); const unsigned long long full_program_start = current_time_ns(); { //======================================================================================================================================================150 // PROCESS INTERACTIONS //======================================================================================================================================================150 { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for private(i, j, k) private(first_i, rA, fA) private(pointer, first_j, rB, qB) private(r2, u2, fs, vij, fxij, fyij, fzij, d) for(l=0; l<dim.number_boxes; l=l+1){ //------------------------------------------------------------------------------------------100 // home box - box parameters //------------------------------------------------------------------------------------------100 first_i = box[l].offset; // offset to common arrays //------------------------------------------------------------------------------------------100 // home box - distance, force, charge and type parameters from common arrays //------------------------------------------------------------------------------------------100 rA = &rv[first_i]; fA = &fv[first_i]; //------------------------------------------------------------------------------------------100 // Do for the # of (home+neighbor) boxes //------------------------------------------------------------------------------------------100 for (k=0; k<(1+box[l].nn); k++) { //----------------------------------------50 // neighbor box - get pointer to the right box //----------------------------------------50 if(k==0){ pointer = l; // set first box to be processed to home box } else{ pointer = box[l].nei[k-1].number; // remaining boxes are neighbor boxes } //----------------------------------------50 // neighbor box - box parameters //----------------------------------------50 first_j = box[pointer].offset; //----------------------------------------50 // neighbor box - distance, force, charge and type parameters //----------------------------------------50 rB = &rv[first_j]; qB = &qv[first_j]; //----------------------------------------50 // Do for the # of particles in home box //----------------------------------------50 for (i=0; i<NUMBER_PAR_PER_BOX; i=i+1){ // do for the # of particles in current (home or neighbor) box for (j=0; j<NUMBER_PAR_PER_BOX; j=j+1){ // // coefficients r2 = rA[i].v + rB[j].v - DOT(rA[i],rB[j]); u2 = a2*r2; vij= exp(-u2); fs = 2.*vij; d.x = rA[i].x - rB[j].x; d.y = rA[i].y - rB[j].y; d.z = rA[i].z - rB[j].z; fxij=fs*d.x; fyij=fs*d.y; fzij=fs*d.z; // forces fA[i].v += qB[j]*vij; fA[i].x += qB[j]*fxij; fA[i].y += qB[j]*fyij; fA[i].z += qB[j]*fzij; } // for j } // for i } // for k } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma117_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } // for l } ; const unsigned long long full_program_end = current_time_ns(); printf("full_program %llu ns\n", full_program_end - full_program_start); time4 = get_time(); //======================================================================================================================================================150 // DISPLAY TIMING //======================================================================================================================================================150 printf("Time spent in different stages of CPU/MCPU KERNEL:\n"); printf("%15.12f s, %15.12f % : CPU/MCPU: VARIABLES\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time4-time0) * 100); printf("%15.12f s, %15.12f % : MCPU: SET DEVICE\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time4-time0) * 100); printf("%15.12f s, %15.12f % : CPU/MCPU: INPUTS\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time4-time0) * 100); printf("%15.12f s, %15.12f % : CPU/MCPU: KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time4-time0) * 100); printf("Total time:\n"); printf("%.12f s\n", (float) (time4-time0) / 1000000); } // main // #ifdef __cplusplus // } // #endif
GB_unop__identity_fp32_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp32_fc64) // op(A') function: GB (_unop_tran__identity_fp32_fc64) // C type: float // A type: GxB_FC64_t // cast: float cij = (float) creal (aij) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) creal (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) creal (aij) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp32_fc64) ( float *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; float z = (float) creal (aij) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; float z = (float) creal (aij) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp32_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_uint16_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_fp32) // op(A') function: GB (_unop_tran__identity_uint16_fp32) // C type: uint16_t // A type: float // cast: uint16_t cij = GB_cast_to_uint16_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_fp32) ( uint16_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel_push_pop_stack.c
#include <stdio.h> #include <omp.h> int MAXSIZE = 8; int stack[8]; int top = -1; int isEmpty() { if(top == -1) return 1; else return 0; } int isFull() { if(top == MAXSIZE) return 1; else return 0; } int pop() { int data; if(!isEmpty()) { data = stack[top]; top = top - 1; return data; } else { printf("Could not retrieve data, Stack is empty.\n"); } } int push(int data) { if(!isFull()) { top = top + 1; stack[top] = data; } else { printf("Could not insert data, Stack is full.\n"); } } int main(){ int id, num; omp_set_dynamic(0); #pragma omp parallel num_threads(2) { id = omp_get_thread_num(); if(id == 0){ while(1){ #pragma omp critical { if(!isFull()){ printf("Enter a number to push\n"); scanf("%d", &num); push(num); } else { printf("Stack is full"); } fgetc(stdin); } } } else { while(1){ #pragma omp critical { if(!isEmpty()){ printf("Deleted item = %d\n", pop()); } else { printf("Stack is empty"); } fgetc(stdin); } } } } return 0; }
firstlastprivate.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif int main() { int i, n = 7; int a[n], suma=0; for (i=0; i<n; i++) a[i] = i; #pragma omp parallel for firstprivate(suma) lastprivate(suma) for (i=0; i<n; i++){ suma = suma + a[i]; printf(" thread %d suma a[%d] suma=%d \n",omp_get_thread_num(),i,suma); } printf("\nFuera de la construcción parallel suma=%d\n",suma); }
GB_binop__rdiv_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_fp32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__rdiv_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fp32) // A*D function (colscale): GB (_AxD__rdiv_fp32) // D*A function (rowscale): GB (_DxB__rdiv_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fp32) // C=scalar+B GB (_bind1st__rdiv_fp32) // C=scalar+B' GB (_bind1st_tran__rdiv_fp32) // C=A+scalar GB (_bind2nd__rdiv_fp32) // C=A'+scalar GB (_bind2nd_tran__rdiv_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = (bij / aij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (y / x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_FP32 || GxB_NO_RDIV_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rdiv_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rdiv_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = (bij / x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = (y / aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij / x) ; \ } GrB_Info GB (_bind1st_tran__rdiv_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (y / aij) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gbdt.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <LightGBM/cuda/vector_cudahost.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/threading.h> #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include "score_updater.hpp" namespace LightGBM { using json11::Json; /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config *gbdt_config, const Dataset *train_data, const ObjectiveFunction *objective_function, const std::vector<const Metric *> &training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting *other) override { auto other_gbdt = reinterpret_cast<const GBDT *>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto &tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto &tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset *train_data, const ObjectiveFunction *objective_function, const std::vector<const Metric *> &training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config *gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset *valid_data, const std::vector<const Metric *> &valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequency of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string &model_output_path) override; void RefitTree(const std::vector<std::vector<int>> &tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter(const score_t *gradients, const score_t *hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double *GetTrainingScore(int64_t *out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double *out_result, int64_t *out_len) override; /*! * \brief Get number of prediction for one data * \param start_iteration Start index of the iteration to predict * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_pred_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, max_iteration); if (num_iteration > 0) { num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration)); } else { num_pred_in_one_row *= (max_iteration - start_iteration); } } else if (is_pred_contrib) { num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_pred_in_one_row; } void PredictRaw(const double *features, double *output, const PredictionEarlyStopInstance *earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double> &features, double *output, const PredictionEarlyStopInstance *early_stop) const override; void Predict(const double *features, double *output, const PredictionEarlyStopInstance *earlyStop) const override; void PredictByMap(const std::unordered_map<int, double> &features, double *output, const PredictionEarlyStopInstance *early_stop) const override; void PredictLeafIndex(const double *features, double *output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double> &features, double *output) const override; void PredictContrib(const double *features, double *output) const override; void PredictContribByMap(const std::unordered_map<int, double> &features, std::vector<std::unordered_map<int, double>> *output) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration, int feature_importance_type) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char *filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int start_iteration, int num_iterations, int feature_importance_type, const char *filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Non-empty string if succeeded */ std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char *buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Calculate upper bound value * \return upper bound value */ double GetUpperBoundValue() const override; /*! * \brief Calculate lower bound value * \return lower bound value */ double GetLowerBoundValue() const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, num_iteration_for_pred_); if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration); } else { num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration; } start_iteration_for_pred_ = start_iteration; if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char *SubModelName() const override { return "tree"; } bool IsLinear() const override { return linear_tree_; } protected: virtual bool GetIsConstHessian(const ObjectiveFunction *objective_function) { if (objective_function != nullptr) { return objective_function->IsConstantHessian(); } else { return false; } } /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config *config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt, data_size_t *buffer); data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt, data_size_t *buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree *tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric *metric, const double *score) const; /*! * \brief Print metric result of current iteration * \param iter Current iteration * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset *train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction *objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric *> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric *>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! * potential trees * First index means the index of tree * Second index means the index of node * Thrid index means the index of potential tree pointer */ std::vector<std::vector<std::vector<Tree>>> potential_models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; #ifdef USE_CUDA /*! \brief First order derivative of training data */ std::vector<score_t, CHAllocator<score_t>> gradients_; /*! \brief Second order derivative of training data */ std::vector<score_t, CHAllocator<score_t>> hessians_; #else /*! \brief First order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_; /*! \brief Second order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_; #endif /*! \brief Store the indices of in-bag data */ std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Start iteration of used model */ int start_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; const int bagging_rand_block_ = 1024; std::vector<Random> bagging_rands_; ParallelPartitionRunner<data_size_t, false> bagging_runner_; Json forced_splits_json_; bool linear_tree_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
GB_unaryop__lnot_bool_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_bool_int64 // op(A') function: GB_tran__lnot_bool_int64 // C type: bool // A type: int64_t // cast: bool cij = (bool) aij // unaryop: cij = !aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_bool_int64 ( bool *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_bool_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ques14.c
#include<stdio.h> #include<omp.h> #include<math.h> main() { int n,i,j; int arr[1000]={0}; printf("Enter n\n"); scanf("%d",&n); for(i=2;i<=sqrt(n);i++) { if(arr[i]==0) { #pragma omp parallel for for(j=i*i;j<=n;j=j+i) arr[j]=1; } } for(i=2;i<=n;i++) if(arr[i]==0) printf("%d\n",i); }
GB_unop__log2_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log2_fp64_fp64) // op(A') function: GB (_unop_tran__log2_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = log2 (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = log2 (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = log2 (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG2 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log2_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = log2 (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = log2 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log2_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
coloring_dense_sparse.h
#ifndef COLORING_DENSE_SPARSE_H_ #define COLORING_DENSE_SPARSE_H_ #include <omp.h> #include <cassert> #include <random> #include "gms/third_party/gapbs/benchmark.h" #include "gms/third_party/gapbs/builder.h" #include "gms/third_party/gapbs/command_line.h" #include "gms/third_party/gapbs/graph.h" #include "coloring_common.h" #include "random_select.h" #include "coloring_barenboim.h" namespace GMS::Coloring { typedef NodeId ComponentID; template <class CGraph> class Coloring_Dense_Sparse { private: // in - out: CGraph &g; std::vector<ColorID> &coloring; // const AlgoParameters& algoParams; // constants from paper const double K = 100; // sufficiently large (hope is big enough !?) const double C = 1.0/K/6.0; // small >0, 6C at most 1/K double epsilon; double alpha = 0.01; // initial coloring probability // our own constants double friendBeta = 0.01; // prob: if check for friendEdge double friendBetaThreashold = 0.5; // for density // local states: DetailTimer detailTimer; int64_t n; int64_t delta; int64_t deltaM; int64_t min1Delta; double friendNumberPaper; double friendNumber; // basically copy of g, but only has friend edges std::vector<uint64_t> friendGraphOffset; std::vector<NodeId> friendGraph; std::vector<NodeId> friendEdgeCounter; // denseGraph const NodeId invalidNode = -1; NodeId nDense; std::vector<NodeId> denseToGraph; // v -> vDense || invalidNode std::vector<NodeId> graphToDense; // vDense -> v std::vector<NodeId> denseGraph; // Adjacency list format // uses same vector as friendGraph (swapped at some point) std::vector<NodeId> denseGraphEdgeIndex; // offset for vDense in denseGraph std::vector<NodeId> denseGraphEdgeCount; // offset for vDense in denseGraph // dense Components std::vector<ComponentID> denseComponent; // vDense -> componentID ComponentID nComponent; std::vector<NodeId> componentMembers; std::vector<NodeId> componentCount; // componentID -> count, decreases over time as get colored std::vector<NodeId> componentMembersOffset; // componentID -> where list starts public: Coloring_Dense_Sparse(CGraph& _g, std::vector<ColorID> &_coloring) : g(_g), coloring(_coloring) { n = g.num_nodes(); // need sorted neighborhoods for friend-edge finding sort_graph_neighborhoods(g); assert(is_sorted(g)); int64_t delta_ = 0; int64_t deltaM_ = n; #pragma omp parallel for reduction(max:delta_) reduction(min:deltaM_) for(NodeId v=0;v<n;v++) { assert(g.out_degree(v) == g.in_degree(v)); delta_ = std::max(delta_,g.out_degree(v)); deltaM_ = std::min(deltaM_,g.out_degree(v)); } delta = delta_; deltaM = deltaM_; // std::cout << "-- Constants and Graph properties:" << std::endl; // std::cout << "degree: max " << delta << ", min " << deltaM << std::endl; // constants from paper epsilon = C * std::pow(100.0,-std::sqrt(std::log((double)delta))); // std::cout << "C: " << C << std::endl; // std::cout << "K: " << K << std::endl; // std::cout << "epsilon from paper: " << epsilon << std::endl; // reflection about this condition below: const double conditionFraction = std::pow(epsilon,4)*delta / (K*std::log(n)); if( conditionFraction < 1.0 ) { // std::cout << "K: condition not satisfied, just run [9] Barenboim, according to paper." << std::endl; // std::cout << "K: condition off by factor " << conditionFraction << std::endl; } else { // std::cout << "K: assumption on K is satisfied, algo makes sense here." << std::endl; } min1Delta = std::max(1l,delta); friendNumberPaper = (1.0 - epsilon) * delta; // std::cout << "friendNumber from paper: " << friendNumberPaper << std::endl; // std::cout << "-- Parameters (set with -p 'n1=v1,n2=v2'):" << std::endl; double epsilonApplied = epsilon;//algoParams.getDouble("epsilon",epsilon); // std::cout << "epsilon applied: " << epsilonApplied << std::endl; // if(epsilonApplied > 0.2) {std::cout << " WARNING: epsilon should not be higher than 0.2, according to paper!" << std::endl;} assert(epsilonApplied > 0); friendNumber = std::ceil((1.0 - epsilonApplied)*min1Delta); // epsilon <= 1/5 for small dense component diameters // std::cout << "friendNumber applied: " << friendNumber << " (derived)" << std::endl; alpha = 0.01;//algoParams.getDouble("alpha",0.01); // std::cout << "alpha: " << alpha << " (initial coloring prob)" << std::endl; friendBeta = 1.0;//algoParams.getDouble("beta",1.0); friendBetaThreashold = 1.0;//algoParams.getDouble("betaT",1.0); // std::cout << "beta: " << friendBeta << " (prob to check edge if friend-edge)"<< std::endl; // std::cout << "betaT: " << friendBetaThreashold << " (beta threashold)" << std::endl; // std::cout << "--" << std::endl; // detailTimer.endPhase("init"); } void decomposition() { decomposition_friend_edges(); if(nDense>0) { decomposition_dense_graph(); decomposition_components(); } } void decomposition_friend_edges() { friendGraphOffset.resize(n+1,0); #pragma omp parallel for for(uint64_t i=0; i<n; i++) { friendGraphOffset[i] = g.out_neigh(i).begin() - g.out_neigh(0).begin(); } friendGraphOffset[n] = g.num_edges()*2; friendGraph.resize(friendGraphOffset[n]); friendEdgeCounter.resize(n,0); // Idea: could do graphToDense via settin 1, prefixsum // remove fetch_and_add, maybe faster? graphToDense.resize(n,invalidNode); nDense = 0; // Idea: dynamic, so that better balance? // was true, especially for degree_ordered graphs // seems not to have had performance impact elsewhere // other Idea: // half time by only considering edges from low->high. // Then find other edges for high ID later // - or: just do atomically! // - or: write 0/1 into friendEdge, write 1 if want edge. // - must write into others friendEdge, so some cache issues, but could work out. // other idea: // combine component finding and friend edges? // somehow collect almost cliques? // // - when getting friend edge: continue around it (already know shared neighbors) // - hmm not sure can save // - since need many friendEdges to be dense, overlap large // - assume you are leader of comp. find what is yours (diam 2 neighbors). // - take all in 2 neighborhood (max delta squared) // - perform friend edge tests there // - if find smaller vertex in 2 neighborhood: // - must see if in same component? // - check if I am even dense? - relies on lots of friend edges... // - check number in 2-neighborhood vs sum over number of neighbors of neighbors? // - could this give me if I am dense? if small N, but large S, know strongly interconnected // - how find N efficiently? // - vote for edges somehow? is friend edge if enough votes? // - go over triangles? - probably same time issues? // new Idea: sub-sample: toss coin if even consider edge // when consider dense? // if has friendBeta*friendNumber friendEdges? // - maybe slightly less bc variance. #pragma omp parallel { std::mt19937 rng(omp_get_thread_num()); std::bernoulli_distribution coin(friendBeta); #pragma omp for schedule(dynamic,8) for (NodeId v = 0; v < n; v++) { [[maybe_unused]] NodeId lastU = 0; if(g.out_degree(v)>=friendNumber) { auto N = g.out_neigh(v); for(NodeId* it = N.begin(); it<N.end(); it++) { const NodeId u = *it; if(v<u && g.out_degree(u)>=friendNumber && coin(rng)) { assert(lastU<=u); // guard against multiple edges lastU = u+1; NodeId* uNit = g.out_neigh(u).begin(); NodeId* vNit = g.out_neigh(v).begin(); NodeId* uNbeg = uNit; const NodeId* uNend = g.out_neigh(u).end(); const NodeId* vNend = g.out_neigh(v).end(); size_t sharedNeighbors = 0; // Idea: abort as soon as cannot have enough shared. while(uNit < uNend && vNit < vNend) { if(*uNit == *vNit) { // match sharedNeighbors++; uNit++;vNit++; } else if (*uNit < *vNit) { uNit++; } else { vNit++; } } if(sharedNeighbors >= friendNumber) { // mark edge as to keep: const uint64_t uIndex = it - N.begin();// where is u in Nv NodeId* uIt = uNbeg; while(*uIt != v) {uIt++;} const uint64_t vIndex = uIt - uNbeg;// where is v in Nu friendGraph[friendGraphOffset[u] + vIndex] = 1; friendGraph[friendGraphOffset[v] + uIndex] = 1; } } } } }// for }// omp parallel // now scan and transform, cound friend edges: #pragma omp parallel for schedule(dynamic,8) for (NodeId v = 0; v < n; v++) { if(g.out_degree(v)>=friendNumber) { auto N = g.out_neigh(v); NodeId* feBegin = &friendGraph[friendGraphOffset[v]]; NodeId* feIt = feBegin; NodeId* feWrite = feBegin; for(NodeId* it = N.begin(); it<N.end(); it++) { const NodeId u = *it; if(*feIt) { *(feWrite++) = *it; // write u into friendGraph at next pos } feIt++; } NodeId feCount = feWrite - feBegin; // how many were written if(feCount>=friendNumber*friendBeta*friendBetaThreashold){ friendEdgeCounter[v] = feCount; const NodeId vDense = fetch_and_add(nDense,1);//atomic graphToDense[v] = vDense; // Idea: if fetch_and_add too expensive: // could do prefix sum over graphToDense } } } // std::cout << "nDense: " << nDense << std::endl; // detailTimer.endPhase("decomp_friend_edge"); } void decomposition_dense_graph() { denseToGraph.resize(nDense); // take friendGraph, condense to denseGraph // keep only dense edges, write vDense instead of v. #pragma omp parallel for for (NodeId v = 0; v < n; v++) { NodeId vDense = graphToDense[v]; if(vDense!=invalidNode) { denseToGraph[vDense] = v; //std::cout << vDense << " " << v << std::endl; // only keep friend edges to dense vertices, write as such: const int64_t vOffsetBegin = friendGraphOffset[v]; const int64_t vOffsetEnd = friendGraphOffset[v]+friendEdgeCounter[v]; std::sort(friendGraph.begin()+vOffsetBegin, friendGraph.begin()+vOffsetEnd); // sort adjacencies NodeId nWrite = 0; for(int64_t o = vOffsetBegin; o<vOffsetEnd; o++) { NodeId u = friendGraph[o]; NodeId uDense = graphToDense[u]; if(uDense!=invalidNode) { friendGraph[vOffsetBegin+nWrite++] = uDense; //std::cout << " " << u << " " << uDense << std::endl; } } friendEdgeCounter[v] = nWrite; } } denseGraphEdgeIndex.resize(nDense); denseGraphEdgeCount.resize(nDense); #pragma omp parallel for for(NodeId vDense = 0; vDense < nDense; vDense++){ NodeId v = denseToGraph[vDense]; denseGraphEdgeIndex[vDense] = friendGraphOffset[v]; denseGraphEdgeCount[vDense] = friendEdgeCounter[v]; } std::swap(denseGraph,friendGraph); // now transformation has happened // Idea: could free up mem for: // friendGraphOffset // friendEdgeCounter // debug print //for(NodeId vDense = 0; vDense < nDense; vDense++){ // NodeId v = denseToGraph[vDense]; // const int64_t vOffsetBegin = denseGraphEdgeIndex[vDense]; // const int64_t vOffsetEnd = vOffsetBegin + denseGraphEdgeCount[vDense]; // std::cout << vDense << " " << v << std::endl; // for(int64_t o = vOffsetBegin; o<vOffsetEnd; o++) { // NodeId uDense = denseGraph[o]; // NodeId u = denseToGraph[uDense]; // std::cout << " " << uDense << " " << u << std::endl; // } //} // detailTimer.endPhase("decomp_dense_graph"); } void decomposition_components() { assert(nDense > 0); denseComponent.resize(nDense); NodeId iComponent = 0; // acquire new id here // just make big, later adjust componentCount.resize(nDense); #pragma omp parallel // num_threads(1) { std::vector<NodeId> bfsQueue(nDense,invalidNode); // local std::vector<NodeId> bfsVisited(nDense,invalidNode); // local // Idea: make dynamic, small block size because all prob small ? #pragma omp for for(NodeId vDense = 0; vDense < nDense; vDense++){ NodeId bfsInsert = 0; NodeId bfsProcess = 0; bfsQueue[bfsInsert++] = vDense; // push first bfsVisited[vDense] = vDense; bool doAbort = false; int64_t nEdges = 0; while((! doAbort) && bfsProcess < bfsInsert) { const NodeId wDense = bfsQueue[bfsProcess++];//take next // visit it's neighbors const int64_t uOffsetBegin = denseGraphEdgeIndex[wDense]; const int64_t uOffsetEnd = uOffsetBegin + denseGraphEdgeCount[wDense]; nEdges += denseGraphEdgeCount[wDense]; for(uint64_t o = uOffsetBegin; o < uOffsetEnd && (!doAbort); o++) { const NodeId uDense = denseGraph[o]; if(uDense < vDense) { // abort, found a smaller index doAbort = true; } if(uDense > vDense && bfsVisited[uDense]!=vDense) { // not yet visited by vDense bfsQueue[bfsInsert++] = uDense; // push new one bfsVisited[uDense] = vDense; // mark as visited by vDense } } } if(!doAbort) { ComponentID componentID = fetch_and_add(iComponent,1); componentCount[componentID] = bfsInsert; // std::cout << componentID<< " I am leader: " << vDense << " " << denseToGraph[vDense] // << " size: " << bfsInsert << " edges: " << nEdges << std::endl; for(uint64_t i = 0; i<bfsInsert; i++) { denseComponent[bfsQueue[i]] = componentID; //std::cout << "member: " << bfsQueue[i] << std::endl; } } } } nComponent = iComponent; // std::cout << "nComponents: " << nComponent << std::endl; componentMembersOffset.resize(nComponent); // Idea: prefix sum - also for inside comp componentMembersOffset[0] = 0; for(ComponentID comp = 1; comp < nComponent; comp++){ componentMembersOffset[comp] = componentMembersOffset[comp-1] + componentCount[comp-1]; } // memory preservation trick below: componentMembers.resize(nComponent); std::swap(componentMembers,componentCount); #pragma omp parallel for for(ComponentID comp = 0; comp < nComponent; comp++){ componentCount[comp] = componentMembers[comp]; } // just for debugging: #pragma omp parallel for for(int64_t i = 0; i<nDense; i++) { componentMembers[i] = -1; } // Idea: prefix sum - together with comp-offset #pragma omp parallel for for(ComponentID comp = 0; comp < nComponent; comp++){ uint64_t next = componentMembersOffset[comp]; for(NodeId vDense = 0; vDense < nDense; vDense++){ if(denseComponent[vDense] == comp) { componentMembers[next++] = vDense; } } assert(next <= componentMembersOffset[comp] + componentCount[comp]); componentCount[comp] = next - componentMembersOffset[comp]; } // validate result: assert(true); #pragma omp parallel for for(NodeId comp = 0; comp < nComponent; comp++) { const NodeId compSize = componentCount[comp]; assert(compSize>0); const uint64_t compOffsetBegin = componentMembersOffset[comp]; const uint64_t compOffsetEnd = compOffsetBegin + compSize; for(uint64_t o = compOffsetBegin; o<compOffsetEnd; o++) { const NodeId vD = componentMembers[o]; // if(denseComponent[vD] != comp) { // std::cout << "validate1 comp: " << comp << " vD: " << vD << " realComp: " << denseComponent[vD] << std::endl; // std::cout << "b " << compOffsetBegin << " o " << o << " e " << compOffsetEnd << std::endl; // } // assert(denseComponent[vD] == comp); } } // reinsert friendEdges between v of same comp // - dropped bc friendBeta sampling #pragma omp parallel for for(NodeId vDense = 0; vDense < nDense; vDense++){ NodeId v = denseToGraph[vDense]; ComponentID comp = denseComponent[vDense]; NodeId* beginIt = &denseGraph[denseGraphEdgeIndex[vDense]]; NodeId* writeIt = beginIt; for(auto u : g.out_neigh(v)) { NodeId uDense = graphToDense[u]; if(uDense!=invalidNode) { ComponentID uComp = denseComponent[uDense]; if(comp==uComp) { *(writeIt++) = uDense; //std::cout << "edge recovered: " << vDense << " " << uDense << std::endl; } } } //std::cout << "from " << denseGraphEdgeCount[vDense] << " to " // << (writeIt - beginIt) << std::endl; denseGraphEdgeCount[vDense] = writeIt - beginIt; } //// debug print //std::cout << "Debug Print componentns:" << std::endl; //for(ComponentID comp = 0; comp < nComponent; comp++){ // int64_t begin = componentMembersOffset[comp]; // for(int64_t o = begin; o < begin + componentCount[comp]; o++){ // NodeId vDense = componentMembers[o]; // std::cout << comp << " " << vDense << std::endl; // } //} // detailTimer.endPhase("decomp_components"); } void initial_coloring() { // Assume that palette at beginning is [Delta+1] for all vertices // with probability alpha = 0.01, pick a random color. Else keep color 0 // - conflict resolution: keep color if no other wants the same. Else set back to 0 if(nDense == 0){return;} std::vector<ColorID> tmpColoring(n,0); uint64_t successes = 0; // Idea: dynamic for better balance? #pragma omp parallel reduction(+:successes) { std::mt19937 rng(omp_get_thread_num()); std::uniform_int_distribution<NodeId> udist(1, delta+1); std::bernoulli_distribution shouldColor(alpha); // tentative coloring round #pragma omp for for (NodeId v = 0; v < n; v++) { if(shouldColor(rng)) { tmpColoring[v] = udist(rng); } } // conflict resolution round #pragma omp for for (NodeId v = 0; v < n; v++) { ColorID c = tmpColoring[v]; if(c>0) { bool keepColor = true; for (NodeId u : g.out_neigh(v)) { if(coloring[u] == c) { keepColor = false; break; } } if(keepColor) { coloring[v] = c; successes++; //std::cout << "init col: " << v << " " << graphToDense[v] << std::endl; } // commit if no conflict } } } // std::cout << "init col successes: " << successes << std::endl; // removal of colored component members: #pragma omp parallel for for(NodeId comp = 0; comp < nComponent; comp++) { const NodeId compSize = componentCount[comp]; assert(compSize>0); const NodeId compOffsetBegin = componentMembersOffset[comp]; const NodeId compOffsetEnd = compOffsetBegin + compSize; NodeId writeIndex = 0; for(NodeId o = compOffsetBegin; o<compOffsetEnd; o++) { const NodeId vD = componentMembers[o]; // if(denseComponent[vD] != comp) { // std::cout << "comp: " << comp << " vD: " << vD << " realComp: " << denseComponent[vD] << std::endl; // } assert(denseComponent[vD] == comp); const NodeId v = denseToGraph[vD]; const ColorID vCol = coloring[v]; if(vCol == 0) { // keep (else overwrite) componentMembers[compOffsetBegin + writeIndex++] = vD; } } componentCount[comp] = writeIndex; // number of kept members } // detailTimer.endPhase("init_coloring"); } void coloring_steps() { if(nDense == 0){return;} // - need external degree (degree of edges to outside component) - dBar(v)i - at most: epsilon delta // - need anti-degree (non-neighbors of v in same component) a(v)i - at most: 3 epsilon delta // - Di >= than both // - Zi at most palette size of dense vertices const uint64_t nStepsDenseColoring = std::ceil(std::log((double) delta)); // std::cout << "nStepsDenseColoring: " << nStepsDenseColoring << std::endl; // must use color palettes: // need easy random select, but also easy removal. // could just delete (mark deleted, decr if needed) // cleanup on next select, keep boundary up to which clean? // or cheaper to just move all? - for now just do this. // but find is not cheap (well log, so ok) std::vector<std::vector<ColorID>> palettes(nDense); std::vector<NodeId> externalDegree(nDense,0); std::vector<NodeId> internalDegree(nDense,0); // anti-degree: component_size - internalDegree #pragma omp parallel for for(NodeId vD = 0; vD < nDense; vD++){ palettes[vD].resize(delta+1); std::generate(palettes[vD].begin(), palettes[vD].end(), [c = 1]() mutable { return c++; }); const NodeId v = denseToGraph[vD]; const NodeId vComp = denseComponent[vD]; for (NodeId u : g.out_neigh(v)) { const ColorID uColor = coloring[u]; if(uColor == 0) {// note still uncolored // count as neighbor const NodeId uD = graphToDense[u]; if(uD == invalidNode) { // u is sparse node externalDegree[vD]++; //std::cout << "init eD++ " << vD << " " << uD << std::endl; } else { // u is dense node const NodeId uComp = denseComponent[uD]; if(vComp == uComp) { internalDegree[vD]++; //std::cout << "init iD++ " << vD << " " << uD << std::endl; } else { externalDegree[vD]++; //std::cout << "init eD++ " << vD << " " << uD << std::endl; } } } else { // update color palette auto palette_v_end = std::remove(palettes[vD].begin(), palettes[vD].end(), uColor); palettes[vD].resize(std::distance(palettes[vD].begin(), palette_v_end)); } } //std::cout << "prep dense: " << vD << " eD: " << externalDegree[vD] << " iD: " << internalDegree[vD] << " pal: " << palettes[vD].size() << std::endl; } // detailTimer.endPhase("coloring_steps_prep"); for(int i = 0; i< nStepsDenseColoring; i++) { // std::cout << "Starting next dense coloring round: " << i << std::endl; // for each component // - pick L vertices at random // - color with random color from palette // - if conflict with color from other component, keep only color of smaller componentID std::vector<ColorID> tmpColoring(nDense,0); // what to do about random accesses? use same order as in componentMembers - could change...but so what, only relevant for this round! // set after conflict resolution and before commit std::vector<ColorID> commitColoring(nDense,0); #pragma omp parallel { int stride = std::max(omp_get_num_threads(), (int)nStepsDenseColoring); std::mt19937 rng(omp_get_thread_num()*stride + i); // IDEA: make dynamic! #pragma omp for schedule(dynamic,8) for(NodeId comp = 0; comp < nComponent; comp++) { // find Di and Zi NodeId Di = 1; NodeId Zi = delta; const NodeId compSize = componentCount[comp]; if(compSize == 0) {continue;} assert(compSize>0); const int64_t compOffsetBegin = componentMembersOffset[comp]; const int64_t compOffsetEnd = compOffsetBegin + compSize; for(int64_t o = compOffsetBegin; o<compOffsetEnd; o++) { const NodeId vD = componentMembers[o]; Di = std::max(Di, std::max(externalDegree[vD] , componentCount[comp]-internalDegree[vD])); const NodeId Qiv = palettes[vD].size(); Zi = std::min(Zi, Qiv); // if(denseComponent[vD] != comp) { // std::cout << "comp: " << comp << " vD: " << vD << " realComp: " << denseComponent[vD] << std::endl; // } assert(denseComponent[vD] == comp); } const double DbyZ = (double)Di / (double)Zi; const double ZbyD = (double)Zi / (double)Di; const NodeId L = std::ceil((double)componentCount[comp] * (1.0-2.0*DbyZ*std::log(ZbyD))); //std::cout << "comp: " << comp << " Size: " << componentCount[comp] << " Di: " << Di << " Zi: " << Zi << " L: " << L << std::endl; //std::cout << "CompInit: " << comp << " size: " << compSize << " Di: " << Di // << " Zi: " << Zi << " L: " << L << std::endl; if(Di == 0 || Zi == 0 || L == 0 || compSize == 0) { for(int64_t o = compOffsetBegin; o<compOffsetEnd; o++) { const NodeId vD = componentMembers[o]; // std::cout << "vD: " << vD << " eD: " << externalDegree[vD] // << " iD: " << internalDegree[vD] << std::endl; } } assert(Di > 0); assert(Zi > 0); assert(L > 0 && L <= compSize); // select L from component std::vector<NodeId> permutation(componentCount[comp]); std::generate(permutation.begin(), permutation.end(), [c = 0]() mutable { return c++; }); std::shuffle(permutation.begin(),permutation.end(),rng); for(NodeId i = 0; i<L; i++) { NodeId pi = permutation[i]; const NodeId o = componentMembersOffset[comp] + pi; const NodeId vD = componentMembers[o]; // pick random color from palette, but cannot be one of neighbor just chosen. bool retry = true; while(retry) { retry = false; const NodeId palIndex = std::uniform_int_distribution<NodeId>{0, palettes[vD].size()-1}(rng); const ColorID cCandidate = palettes[vD][palIndex]; // for all neighbors in comp: const uint64_t index_begin = denseGraphEdgeIndex[vD]; const uint64_t index_end = index_begin + denseGraphEdgeCount[vD]; for(uint64_t uOffset = index_begin; uOffset < index_end && (! retry); uOffset++) { const NodeId uD = denseGraph[uOffset]; //const NodeId u = denseToGraph[uD]; assert(denseComponent[uD]==denseComponent[vD]); //const uCol = coloring[u]; const ColorID uColTmp = tmpColoring[vD]; if(cCandidate==uColTmp) { // reject cCandidate retry = true; } } if(!retry) {tmpColoring[vD] = cCandidate;} } } } // validate tmp colors, only take if no other vertex has same (only possible if from other component) // would it make sense to store these edges in dedicated structure? #pragma omp for schedule(dynamic,8) for(NodeId vD = 0; vD < nDense; vD++) { const NodeId v = denseToGraph[vD]; const ColorID cTmp = tmpColoring[vD]; if(cTmp!=0) { // for all that have just been tmp colored const NodeId v = denseToGraph[vD]; bool hasConflict = false; for (NodeId u : g.out_neigh(v)) { const NodeId uD = graphToDense[u]; if(uD != invalidNode) {// u is dense if(tmpColoring[uD] == cTmp && v > u) {hasConflict = true;} } } if(hasConflict) { // reject color //std::cout << vD << " cTmp:" << cTmp << " - rejected"<< std::endl; } else { // commit color, update things //std::cout << vD << " cTmp:" << cTmp << " - commit"<< std::endl; commitColoring[vD] = cTmp; } } } // commit color and update internalDegree and externalDegree and palletes #pragma omp for schedule(dynamic,8) for(NodeId vD = 0; vD < nDense; vD++) { const NodeId v = denseToGraph[vD]; const ColorID col = commitColoring[vD]; const NodeId vComp = denseComponent[vD]; if(col!=0) { // if color commited coloring[v] = col; } else { // update things: for (NodeId u : g.out_neigh(v)) { const NodeId uD = graphToDense[u]; if(uD != invalidNode) {// u is dense const ColorID uColCommit = commitColoring[uD]; if(uColCommit != 0) { const NodeId uComp = denseComponent[uD]; if(vComp == uComp) { internalDegree[vD]--; //std::cout << "iD-- " << vD << " " << uD << std::endl; } else { externalDegree[vD]--; //std::cout << "eD-- " << vD << " " << uD << std::endl; } auto palette_v_end = std::remove(palettes[vD].begin(), palettes[vD].end(), uColCommit); palettes[vD].resize(std::distance(palettes[vD].begin(), palette_v_end)); } } } } } // removal of component members: #pragma omp for schedule(dynamic,8) for(NodeId comp = 0; comp < nComponent; comp++) { const NodeId compSize = componentCount[comp]; if(compSize == 0) {continue;} assert(compSize>0); const NodeId compOffsetBegin = componentMembersOffset[comp]; const NodeId compOffsetEnd = compOffsetBegin + compSize; NodeId writeIndex = 0; for(NodeId o = compOffsetBegin; o<compOffsetEnd; o++) { const NodeId vD = componentMembers[o]; const ColorID vCol = commitColoring[vD]; if(vCol == 0) { // keep (else overwrite) componentMembers[compOffsetBegin + writeIndex++] = vD; } } componentCount[comp] = writeIndex; // number of kept members } if(false){ // validate updates, can remove later #pragma omp for for(NodeId comp = 0; comp < nComponent; comp++) { const NodeId compSize = componentCount[comp]; if(compSize == 0) {continue;} assert(compSize>0); const NodeId compOffsetBegin = componentMembersOffset[comp]; const NodeId compOffsetEnd = compOffsetBegin + compSize; for(NodeId o = compOffsetBegin; o<compOffsetEnd; o++) { const NodeId vD = componentMembers[o]; NodeId intDeg = 0; NodeId extDeg = 0; const NodeId v = denseToGraph[vD]; for (NodeId u : g.out_neigh(v)) { if(coloring[u] == 0) { const NodeId uD = graphToDense[u]; if(uD != invalidNode) { const NodeId uComp = denseComponent[uD]; if(uComp == comp) { intDeg++; // dense of same comp //std::cout << "valid iD " << vD << " " << uD << std::endl; } else { extDeg++; // dense of other comp //std::cout << "valid eD " << vD << " " << uD << std::endl; } } else { extDeg++; // sparse //std::cout << "valid eD " << vD << " " << uD << std::endl; } } } //std::cout << "validate: " << vD << " " << intDeg << " " << internalDegree[vD] // << " " << extDeg << " " << externalDegree[vD] << std::endl; // if(intDeg != internalDegree[vD] || extDeg != externalDegree[vD]) { // std::cout << "validate: " << vD << " " << v // << " " << intDeg << " " << internalDegree[vD] // << " " << extDeg << " " << externalDegree[vD] // << " comp " << comp << std::endl; // } assert(intDeg == internalDegree[vD]); assert(extDeg == externalDegree[vD]); } } } }// pragma omp parallel std::string phaseName("coloring_step_"); phaseName += std::to_string(i); detailTimer.endPhase(phaseName.c_str()); } } // ~Coloring_Dense_Sparse() { // // detailTimer.endPhase("rest"); // detailTimer.print(); // } int n_colors() {return n;} // void print_nColored() { // // debug counting below: // uint64_t nColored = 0; // #pragma omp parallel for reduction(+:nColored) // for(uint64_t i =0; i<n; i++) { // if(coloring[i]>0){nColored++;} // assert(coloring[i] <= delta+1); // } // // std::cout << "nColored: " << nColored << std::endl; // } // void elkin() { // // 4. Run algo for sparse Vertices [12: Elkin] // //all_nodes - graphToDense, sparse_value = cds.invalidNode // print_nColored(); // debug // std::cout << "Elkin:" << std::endl; // coloring_elkin_subalgo_interface(g, coloring, graphToDense, invalidNode, algoParams); // print_nColored(); // debug // detailTimer.endPhase("elkin"); // } void barenboim() { // 5. Run algo for residual graph [9: Barenboim] std::cout << "Barenboim:" << std::endl; coloring_barenboim_subalgo_interface(g, coloring); // print_nColored(); // debug // detailTimer.endPhase("barenboim"); } }; template <class CGraph> int graph_coloring_dense_sparse(CGraph& g, std::vector<ColorID> &coloring) { // A fundamental assumption of the algo is that the leader of a dense component // can color the members in sequence // this seems to assume the dense coloring steps of a single compnent might not be parallelizable // So we either need many dense components or need them to be very small // should they be large and few, then either the graph is not very densely connected // or the whole graph is densely connected. But then it is hard to have a faster than linear algo anyway. Coloring_Dense_Sparse cds(g,coloring); cds.decomposition(); cds.initial_coloring(); cds.coloring_steps(); // relevant for next steps: // coloring - 0 if not yet colored // cds.graphToDense - if cds.invalidNode, then sparse // possibly: also send palettes for sparse vertices, do have them cds.barenboim(); return cds.n_colors(); } } // namespace GMS::Coloring #endif // COLORING_DENSE_SPARSE_H_
GB_cumsum.c
//------------------------------------------------------------------------------ // GB_cumsum: cumlative sum of an array //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Compute the cumulative sum of an array count[0:n], of size n+1 // in pseudo-MATLAB notation: // k = sum (count [0:n-1] != 0) ; // count = cumsum ([0 count[0:n-1]]) ; // That is, count [j] on input is overwritten with the value of // sum (count [0..j-1]). count [n] is implicitly zero on input. // On output, count [n] is the total sum. #include "GB.h" void GB_cumsum // cumulative sum of an array ( int64_t *GB_RESTRICT count, // size n+1, input/output const int64_t n, int64_t *GB_RESTRICT kresult, // return k, if needed by the caller int nthreads ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (count != NULL) ; ASSERT (n >= 0) ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- #if !defined ( _OPENMP ) nthreads = 1 ; #endif if (nthreads > 1) { nthreads = GB_IMIN (nthreads, n / 1024) ; nthreads = GB_IMAX (nthreads, 1) ; } //-------------------------------------------------------------------------- // count = cumsum ([0 count[0:n-1]]) ; //-------------------------------------------------------------------------- if (kresult == NULL) { if (nthreads <= 2) { //------------------------------------------------------------------ // cumsum with one thread //------------------------------------------------------------------ int64_t s = 0 ; for (int64_t i = 0 ; i < n ; i++) { int64_t c = count [i] ; count [i] = s ; s += c ; } count [n] = s ; } else { //------------------------------------------------------------------ // cumsum with multiple threads //------------------------------------------------------------------ // allocate workspace int64_t *ws = NULL ; GB_MALLOC_MEMORY (ws, nthreads, sizeof (int64_t)) ; if (ws == NULL) { // out of memory; use a single thread instead GB_cumsum (count, n, NULL, 1) ; return ; } #pragma omp parallel num_threads(nthreads) { // each thread sums up its own part int tid = GB_OPENMP_THREAD_ID ; int64_t istart, iend ; GB_PARTITION (istart, iend, n, tid, nthreads) ; int64_t s = 0 ; for (int64_t i = istart ; i < iend ; i++) { s += count [i] ; } ws [tid] = s ; #pragma omp barrier // each thread computes the cumsum of its own part s = 0 ; for (int i = 0 ; i < tid ; i++) { s += ws [i] ; } for (int64_t i = istart ; i < iend ; i++) { int64_t c = count [i] ; count [i] = s ; s += c ; } if (iend == n) { count [n] = s ; } } // free workspace GB_FREE_MEMORY (ws, nthreads, sizeof (int64_t)) ; } } else { if (nthreads <= 2) { //------------------------------------------------------------------ // cumsum with one thread, also compute k //------------------------------------------------------------------ int64_t k = 0 ; int64_t s = 0 ; for (int64_t i = 0 ; i < n ; i++) { int64_t c = count [i] ; if (c != 0) k++ ; count [i] = s ; s += c ; } count [n] = s ; (*kresult) = k ; } else { //------------------------------------------------------------------ // cumsum with multiple threads, also compute k //------------------------------------------------------------------ int64_t *ws = NULL ; GB_MALLOC_MEMORY (ws, 2*nthreads, sizeof (int64_t)) ; if (ws == NULL) { // out of memory; use a single thread instead GB_cumsum (count, n, kresult, 1) ; return ; } int64_t *wk = ws + nthreads ; #pragma omp parallel num_threads(nthreads) { // each thread sums up its own part int tid = GB_OPENMP_THREAD_ID ; int64_t istart, iend ; GB_PARTITION (istart, iend, n, tid, nthreads) ; int64_t k = 0 ; int64_t s = 0 ; for (int64_t i = istart ; i < iend ; i++) { int64_t c = count [i] ; if (c != 0) k++ ; s += c ; } ws [tid] = s ; wk [tid] = k ; #pragma omp barrier // each thread computes the cumsum of its own part s = 0 ; for (int i = 0 ; i < tid ; i++) { s += ws [i] ; } for (int64_t i = istart ; i < iend ; i++) { int64_t c = count [i] ; count [i] = s ; s += c ; } if (iend == n) { count [n] = s ; } } int64_t k = 0 ; for (int tid = 0 ; tid < nthreads ; tid++) { k += wk [tid] ; } (*kresult) = k ; // free workspace GB_FREE_MEMORY (ws, 2*nthreads, sizeof (int64_t)) ; } } }
GB_unop__minv_bool_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_bool_bool) // op(A') function: GB (_unop_tran__minv_bool_bool) // C type: bool // A type: bool // cast: ; // unaryop: cij = true #define GB_ATYPE \ bool #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = true ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_bool_bool) ( bool *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = true ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; ; ; ; ; Cx [p] = true ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_bool_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(8*t2-Nz,4)),t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(4*t1+Ny+5,4)),floord(8*t2+Ny+4,4)),floord(8*t1-8*t2+Nz+Ny+3,4));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(8*t2-Nz-60,64)),ceild(4*t3-Ny-60,64));t4<=min(min(min(min(floord(4*t3+Nx,64),floord(Nt+Nx-4,64)),floord(4*t1+Nx+5,64)),floord(8*t2+Nx+4,64)),floord(8*t1-8*t2+Nz+Nx+3,64));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),4*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),4*t3+2),64*t4+62),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
morn_image_convert.c
/* Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com> Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "morn_image.h" short v_to_r[256] = {-180 ,-178 ,-177 ,-175 ,-174 ,-173 ,-171 ,-170 ,-168 ,-167 ,-166 ,-164 ,-163 ,-161 ,-160 ,-159 ,-157 ,-156 ,-154 ,-153 ,-152 ,-150 ,-149 ,-147 ,-146 ,-145 ,-143 ,-142 ,-140 ,-139 ,-137 ,-136 ,-135 ,-133 ,-132 ,-130 ,-129 ,-128 ,-126 ,-125 ,-123 ,-122 ,-121 ,-119 ,-118 ,-116 ,-115 ,-114 ,-112 ,-111 ,-109 ,-108 ,-107 ,-105 ,-104 ,-102 ,-101 ,-100 ,-98 ,-97 ,-95 ,-94 ,-93 ,-91 ,-90 ,-88 ,-87 ,-86 ,-84 ,-83 ,-81 ,-80 ,-79 ,-77 ,-76 ,-74 ,-73 ,-72 ,-70 ,-69 ,-67 ,-66 ,-65 ,-63 ,-62 ,-60 ,-59 ,-58 ,-56 ,-55 ,-53 ,-52 ,-51 ,-49 ,-48 ,-46 ,-45 ,-43 ,-42 ,-41 ,-39 ,-38 ,-36 ,-35 ,-34 ,-32 ,-31 ,-29 ,-28 ,-27 ,-25 ,-24 ,-22 ,-21 ,-20 ,-18 ,-17 ,-15 ,-14 ,-13 ,-11 ,-10 ,-8 ,-7 ,-6 ,-4 ,-3 ,-1 ,0 ,1 ,3 ,4 ,6 ,7 ,8 ,10 ,11 ,13 ,14 ,15 ,17 ,18 ,20 ,21 ,22 ,24 ,25 ,27 ,28 ,29 ,31 ,32 ,34 ,35 ,36 ,38 ,39 ,41 ,42 ,43 ,45 ,46 ,48 ,49 ,51 ,52 ,53 ,55 ,56 ,58 ,59 ,60 ,62 ,63 ,65 ,66 ,67 ,69 ,70 ,72 ,73 ,74 ,76 ,77 ,79 ,80 ,81 ,83 ,84 ,86 ,87 ,88 ,90 ,91 ,93 ,94 ,95 ,97 ,98 ,100 ,101 ,102 ,104 ,105 ,107 ,108 ,109 ,111 ,112 ,114 ,115 ,116 ,118 ,119 ,121 ,122 ,123 ,125 ,126 ,128 ,129 ,130 ,132 ,133 ,135 ,136 ,137 ,139 ,140 ,142 ,143 ,145 ,146 ,147 ,149 ,150 ,152 ,153 ,154 ,156 ,157 ,159 ,160 ,161 ,163 ,164 ,166 ,167 ,168 ,170 ,171 ,173 ,174 ,175 ,177 ,178}; short u_to_g[256] = {-44 ,-44 ,-43 ,-43 ,-43 ,-42 ,-42 ,-42 ,-41 ,-41 ,-40 ,-40 ,-40 ,-39 ,-39 ,-39 ,-38 ,-38 ,-38 ,-37 ,-37 ,-37 ,-36 ,-36 ,-36 ,-35 ,-35 ,-35 ,-34 ,-34 ,-34 ,-33 ,-33 ,-33 ,-32 ,-32 ,-32 ,-31 ,-31 ,-31 ,-30 ,-30 ,-29 ,-29 ,-29 ,-28 ,-28 ,-28 ,-27 ,-27 ,-27 ,-26 ,-26 ,-26 ,-25 ,-25 ,-25 ,-24 ,-24 ,-24 ,-23 ,-23 ,-23 ,-22 ,-22 ,-22 ,-21 ,-21 ,-21 ,-20 ,-20 ,-20 ,-19 ,-19 ,-19 ,-18 ,-18 ,-17 ,-17 ,-17 ,-16 ,-16 ,-16 ,-15 ,-15 ,-15 ,-14 ,-14 ,-14 ,-13 ,-13 ,-13 ,-12 ,-12 ,-12 ,-11 ,-11 ,-11 ,-10 ,-10 ,-10 ,-9 ,-9 ,-9 ,-8 ,-8 ,-8 ,-7 ,-7 ,-7 ,-6 ,-6 ,-5 ,-5 ,-5 ,-4 ,-4 ,-4 ,-3 ,-3 ,-3 ,-2 ,-2 ,-2 ,-1 ,-1 ,-1 ,0 ,0 ,0 ,1 ,1 ,1 ,2 ,2 ,2 ,3 ,3 ,3 ,4 ,4 ,4 ,5 ,5 ,5 ,6 ,6 ,7 ,7 ,7 ,8 ,8 ,8 ,9 ,9 ,9 ,10 ,10 ,10 ,11 ,11 ,11 ,12 ,12 ,12 ,13 ,13 ,13 ,14 ,14 ,14 ,15 ,15 ,15 ,16 ,16 ,16 ,17 ,17 ,17 ,18 ,18 ,19 ,19 ,19 ,20 ,20 ,20 ,21 ,21 ,21 ,22 ,22 ,22 ,23 ,23 ,23 ,24 ,24 ,24 ,25 ,25 ,25 ,26 ,26 ,26 ,27 ,27 ,27 ,28 ,28 ,28 ,29 ,29 ,29 ,30 ,30 ,31 ,31 ,31 ,32 ,32 ,32 ,33 ,33 ,33 ,34 ,34 ,34 ,35 ,35 ,35 ,36 ,36 ,36 ,37 ,37 ,37 ,38 ,38 ,38 ,39 ,39 ,39 ,40 ,40 ,40 ,41 ,41 ,42 ,42 ,42 ,43 ,43 ,43 ,44}; short v_to_g[256] = {-91 ,-91 ,-90 ,-89 ,-89 ,-88 ,-87 ,-86 ,-86 ,-85 ,-84 ,-84 ,-83 ,-82 ,-81 ,-81 ,-80 ,-79 ,-79 ,-78 ,-77 ,-76 ,-76 ,-75 ,-74 ,-74 ,-73 ,-72 ,-71 ,-71 ,-70 ,-69 ,-69 ,-68 ,-67 ,-66 ,-66 ,-65 ,-64 ,-64 ,-63 ,-62 ,-61 ,-61 ,-60 ,-59 ,-59 ,-58 ,-57 ,-56 ,-56 ,-55 ,-54 ,-54 ,-53 ,-52 ,-51 ,-51 ,-50 ,-49 ,-49 ,-48 ,-47 ,-46 ,-46 ,-45 ,-44 ,-44 ,-43 ,-42 ,-41 ,-41 ,-40 ,-39 ,-39 ,-38 ,-37 ,-36 ,-36 ,-35 ,-34 ,-34 ,-33 ,-32 ,-31 ,-31 ,-30 ,-29 ,-29 ,-28 ,-27 ,-26 ,-26 ,-25 ,-24 ,-24 ,-23 ,-22 ,-21 ,-21 ,-20 ,-19 ,-19 ,-18 ,-17 ,-16 ,-16 ,-15 ,-14 ,-14 ,-13 ,-12 ,-11 ,-11 ,-10 ,-9 ,-9 ,-8 ,-7 ,-6 ,-6 ,-5 ,-4 ,-4 ,-3 ,-2 ,-1 ,-1 ,0 ,1 ,1 ,2 ,3 ,4 ,4 ,5 ,6 ,6 ,7 ,8 ,9 ,9 ,10 ,11 ,11 ,12 ,13 ,14 ,14 ,15 ,16 ,16 ,17 ,18 ,19 ,19 ,20 ,21 ,21 ,22 ,23 ,24 ,24 ,25 ,26 ,26 ,27 ,28 ,29 ,29 ,30 ,31 ,31 ,32 ,33 ,34 ,34 ,35 ,36 ,36 ,37 ,38 ,39 ,39 ,40 ,41 ,41 ,42 ,43 ,44 ,44 ,45 ,46 ,46 ,47 ,48 ,49 ,49 ,50 ,51 ,51 ,52 ,53 ,54 ,54 ,55 ,56 ,56 ,57 ,58 ,59 ,59 ,60 ,61 ,61 ,62 ,63 ,64 ,64 ,65 ,66 ,66 ,67 ,68 ,69 ,69 ,70 ,71 ,71 ,72 ,73 ,74 ,74 ,75 ,76 ,76 ,77 ,78 ,79 ,79 ,80 ,81 ,81 ,82 ,83 ,84 ,84 ,85 ,86 ,86 ,87 ,88 ,89 ,89 ,90 ,91}; short u_to_b[256] = {-227 ,-225 ,-223 ,-221 ,-219 ,-218 ,-216 ,-214 ,-212 ,-211 ,-209 ,-207 ,-205 ,-204 ,-202 ,-200 ,-198 ,-196 ,-195 ,-193 ,-191 ,-189 ,-188 ,-186 ,-184 ,-182 ,-181 ,-179 ,-177 ,-175 ,-173 ,-172 ,-170 ,-168 ,-166 ,-165 ,-163 ,-161 ,-159 ,-158 ,-156 ,-154 ,-152 ,-150 ,-149 ,-147 ,-145 ,-143 ,-142 ,-140 ,-138 ,-136 ,-135 ,-133 ,-131 ,-129 ,-127 ,-126 ,-124 ,-122 ,-120 ,-119 ,-117 ,-115 ,-113 ,-112 ,-110 ,-108 ,-106 ,-104 ,-103 ,-101 ,-99 ,-97 ,-96 ,-94 ,-92 ,-90 ,-89 ,-87 ,-85 ,-83 ,-81 ,-80 ,-78 ,-76 ,-74 ,-73 ,-71 ,-69 ,-67 ,-65 ,-64 ,-62 ,-60 ,-58 ,-57 ,-55 ,-53 ,-51 ,-50 ,-48 ,-46 ,-44 ,-42 ,-41 ,-39 ,-37 ,-35 ,-34 ,-32 ,-30 ,-28 ,-27 ,-25 ,-23 ,-21 ,-19 ,-18 ,-16 ,-14 ,-12 ,-11 ,-9 ,-7 ,-5 ,-4 ,-2 ,0 ,2 ,4 ,5 ,7 ,9 ,11 ,12 ,14 ,16 ,18 ,19 ,21 ,23 ,25 ,27 ,28 ,30 ,32 ,34 ,35 ,37 ,39 ,41 ,42 ,44 ,46 ,48 ,50 ,51 ,53 ,55 ,57 ,58 ,60 ,62 ,64 ,65 ,67 ,69 ,71 ,73 ,74 ,76 ,78 ,80 ,81 ,83 ,85 ,87 ,89 ,90 ,92 ,94 ,96 ,97 ,99 ,101 ,103 ,104 ,106 ,108 ,110 ,112 ,113 ,115 ,117 ,119 ,120 ,122 ,124 ,126 ,127 ,129 ,131 ,133 ,135 ,136 ,138 ,140 ,142 ,143 ,145 ,147 ,149 ,150 ,152 ,154 ,156 ,158 ,159 ,161 ,163 ,165 ,166 ,168 ,170 ,172 ,173 ,175 ,177 ,179 ,181 ,182 ,184 ,186 ,188 ,189 ,191 ,193 ,195 ,196 ,198 ,200 ,202 ,204 ,205 ,207 ,209 ,211 ,212 ,214 ,216 ,218 ,219 ,221 ,223 ,225}; unsigned char r_to_y[256] = {0 ,0 ,1 ,1 ,1 ,1 ,2 ,2 ,2 ,3 ,3 ,3 ,4 ,4 ,4 ,4 ,5 ,5 ,5 ,6 ,6 ,6 ,7 ,7 ,7 ,7 ,8 ,8 ,8 ,9 ,9 ,9 ,10 ,10 ,10 ,10 ,11 ,11 ,11 ,12 ,12 ,12 ,13 ,13 ,13 ,13 ,14 ,14 ,14 ,15 ,15 ,15 ,16 ,16 ,16 ,16 ,17 ,17 ,17 ,18 ,18 ,18 ,19 ,19 ,19 ,19 ,20 ,20 ,20 ,21 ,21 ,21 ,22 ,22 ,22 ,22 ,23 ,23 ,23 ,24 ,24 ,24 ,25 ,25 ,25 ,25 ,26 ,26 ,26 ,27 ,27 ,27 ,28 ,28 ,28 ,28 ,29 ,29 ,29 ,30 ,30 ,30 ,30 ,31 ,31 ,31 ,32 ,32 ,32 ,33 ,33 ,33 ,33 ,34 ,34 ,34 ,35 ,35 ,35 ,36 ,36 ,36 ,36 ,37 ,37 ,37 ,38 ,38 ,38 ,39 ,39 ,39 ,39 ,40 ,40 ,40 ,41 ,41 ,41 ,42 ,42 ,42 ,42 ,43 ,43 ,43 ,44 ,44 ,44 ,45 ,45 ,45 ,45 ,46 ,46 ,46 ,47 ,47 ,47 ,48 ,48 ,48 ,48 ,49 ,49 ,49 ,50 ,50 ,50 ,51 ,51 ,51 ,51 ,52 ,52 ,52 ,53 ,53 ,53 ,54 ,54 ,54 ,54 ,55 ,55 ,55 ,56 ,56 ,56 ,57 ,57 ,57 ,57 ,58 ,58 ,58 ,59 ,59 ,59 ,60 ,60 ,60 ,60 ,61 ,61 ,61 ,62 ,62 ,62 ,62 ,63 ,63 ,63 ,64 ,64 ,64 ,65 ,65 ,65 ,65 ,66 ,66 ,66 ,67 ,67 ,67 ,68 ,68 ,68 ,68 ,69 ,69 ,69 ,70 ,70 ,70 ,71 ,71 ,71 ,71 ,72 ,72 ,72 ,73 ,73 ,73 ,74 ,74 ,74 ,74 ,75 ,75 ,75 ,76 ,76 ,76}; unsigned char g_to_y[256] = {0 ,1 ,1 ,2 ,2 ,3 ,4 ,4 ,5 ,5 ,6 ,6 ,7 ,8 ,8 ,9 ,9 ,10 ,11 ,11 ,12 ,12 ,13 ,14 ,14 ,15 ,15 ,16 ,16 ,17 ,18 ,18 ,19 ,19 ,20 ,21 ,21 ,22 ,22 ,23 ,23 ,24 ,25 ,25 ,26 ,26 ,27 ,28 ,28 ,29 ,29 ,30 ,31 ,31 ,32 ,32 ,33 ,33 ,34 ,35 ,35 ,36 ,36 ,37 ,38 ,38 ,39 ,39 ,40 ,41 ,41 ,42 ,42 ,43 ,43 ,44 ,45 ,45 ,46 ,46 ,47 ,48 ,48 ,49 ,49 ,50 ,50 ,51 ,52 ,52 ,53 ,53 ,54 ,55 ,55 ,56 ,56 ,57 ,58 ,58 ,59 ,59 ,60 ,60 ,61 ,62 ,62 ,63 ,63 ,64 ,65 ,65 ,66 ,66 ,67 ,68 ,68 ,69 ,69 ,70 ,70 ,71 ,72 ,72 ,73 ,73 ,74 ,75 ,75 ,76 ,76 ,77 ,77 ,78 ,79 ,79 ,80 ,80 ,81 ,82 ,82 ,83 ,83 ,84 ,85 ,85 ,86 ,86 ,87 ,87 ,88 ,89 ,89 ,90 ,90 ,91 ,92 ,92 ,93 ,93 ,94 ,95 ,95 ,96 ,96 ,97 ,97 ,98 ,99 ,99 ,100 ,100 ,101 ,102 ,102 ,103 ,103 ,104 ,104 ,105 ,106 ,106 ,107 ,107 ,108 ,109 ,109 ,110 ,110 ,111 ,112 ,112 ,113 ,113 ,114 ,114 ,115 ,116 ,116 ,117 ,117 ,118 ,119 ,119 ,120 ,120 ,121 ,122 ,122 ,123 ,123 ,124 ,124 ,125 ,126 ,126 ,127 ,127 ,128 ,129 ,129 ,130 ,130 ,131 ,131 ,132 ,133 ,133 ,134 ,134 ,135 ,136 ,136 ,137 ,137 ,138 ,139 ,139 ,140 ,140 ,141 ,141 ,142 ,143 ,143 ,144 ,144 ,145 ,146 ,146 ,147 ,147 ,148 ,149 ,149 ,150}; unsigned char b_to_y[256] = {0 ,0 ,0 ,0 ,0 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,6 ,6 ,6 ,6 ,6 ,6 ,6 ,6 ,6 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,8 ,8 ,8 ,8 ,8 ,8 ,8 ,8 ,8 ,9 ,9 ,9 ,9 ,9 ,9 ,9 ,9 ,9 ,10 ,10 ,10 ,10 ,10 ,10 ,10 ,10 ,10 ,11 ,11 ,11 ,11 ,11 ,11 ,11 ,11 ,12 ,12 ,12 ,12 ,12 ,12 ,12 ,12 ,12 ,13 ,13 ,13 ,13 ,13 ,13 ,13 ,13 ,13 ,14 ,14 ,14 ,14 ,14 ,14 ,14 ,14 ,14 ,15 ,15 ,15 ,15 ,15 ,15 ,15 ,15 ,16 ,16 ,16 ,16 ,16 ,16 ,16 ,16 ,16 ,17 ,17 ,17 ,17 ,17 ,17 ,17 ,17 ,17 ,18 ,18 ,18 ,18 ,18 ,18 ,18 ,18 ,18 ,19 ,19 ,19 ,19 ,19 ,19 ,19 ,19 ,19 ,20 ,20 ,20 ,20 ,20 ,20 ,20 ,20 ,21 ,21 ,21 ,21 ,21 ,21 ,21 ,21 ,21 ,22 ,22 ,22 ,22 ,22 ,22 ,22 ,22 ,22 ,23 ,23 ,23 ,23 ,23 ,23 ,23 ,23 ,23 ,24 ,24 ,24 ,24 ,24 ,24 ,24 ,24 ,25 ,25 ,25 ,25 ,25 ,25 ,25 ,25 ,25 ,26 ,26 ,26 ,26 ,26 ,26 ,26 ,26 ,26 ,27 ,27 ,27 ,27 ,27 ,27 ,27 ,27 ,27 ,28 ,28 ,28 ,28 ,28 ,28 ,28 ,28 ,29 ,29 ,29 ,29 ,29 ,29}; unsigned char r_to_v[512] = {0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,1 ,2 ,3 ,3 ,4 ,5 ,5 ,6 ,7 ,8 ,8 ,9 ,10 ,10 ,11 ,12 ,12 ,13 ,14 ,15 ,15 ,16 ,17 ,17 ,18 ,19 ,20 ,20 ,21 ,22 ,22 ,23 ,24 ,25 ,25 ,26 ,27 ,27 ,28 ,29 ,30 ,30 ,31 ,32 ,32 ,33 ,34 ,35 ,35 ,36 ,37 ,37 ,38 ,39 ,40 ,40 ,41 ,42 ,42 ,43 ,44 ,45 ,45 ,46 ,47 ,47 ,48 ,49 ,50 ,50 ,51 ,52 ,52 ,53 ,54 ,55 ,55 ,56 ,57 ,57 ,58 ,59 ,60 ,60 ,61 ,62 ,62 ,63 ,64 ,65 ,65 ,66 ,67 ,67 ,68 ,69 ,70 ,70 ,71 ,72 ,72 ,73 ,74 ,75 ,75 ,76 ,77 ,77 ,78 ,79 ,80 ,80 ,81 ,82 ,82 ,83 ,84 ,85 ,85 ,86 ,87 ,87 ,88 ,89 ,89 ,90 ,91 ,92 ,92 ,93 ,94 ,94 ,95 ,96 ,97 ,97 ,98 ,99 ,99 ,100 ,101 ,102 ,102 ,103 ,104 ,104 ,105 ,106 ,107 ,107 ,108 ,109 ,109 ,110 ,111 ,112 ,112 ,113 ,114 ,114 ,115 ,116 ,117 ,117 ,118 ,119 ,119 ,120 ,121 ,122 ,122 ,123 ,124 ,124 ,125 ,126 ,127 ,127, 128 ,129 ,129 ,130 ,131 ,132 ,132 ,133 ,134 ,134 ,135 ,136 ,137 ,137 ,138 ,139 ,139 ,140 ,141 ,142 ,142 ,143 ,144 ,144 ,145 ,146 ,147 ,147 ,148 ,149 ,149 ,150 ,151 ,152 ,152 ,153 ,154 ,154 ,155 ,156 ,157 ,157 ,158 ,159 ,159 ,160 ,161 ,162 ,162 ,163 ,164 ,164 ,165 ,166 ,167 ,167 ,168 ,169 ,169 ,170 ,171 ,171 ,172 ,173 ,174 ,174 ,175 ,176 ,176 ,177 ,178 ,179 ,179 ,180 ,181 ,181 ,182 ,183 ,184 ,184 ,185 ,186 ,186 ,187 ,188 ,189 ,189 ,190 ,191 ,191 ,192 ,193 ,194 ,194 ,195 ,196 ,196 ,197 ,198 ,199 ,199 ,200 ,201 ,201 ,202 ,203 ,204 ,204 ,205 ,206 ,206 ,207 ,208 ,209 ,209 ,210 ,211 ,211 ,212 ,213 ,214 ,214 ,215 ,216 ,216 ,217 ,218 ,219 ,219 ,220 ,221 ,221 ,222 ,223 ,224 ,224 ,225 ,226 ,226 ,227 ,228 ,229 ,229 ,230 ,231 ,231 ,232 ,233 ,234 ,234 ,235 ,236 ,236 ,237 ,238 ,239 ,239 ,240 ,241 ,241 ,242 ,243 ,244 ,244 ,245 ,246 ,246 ,247 ,248 ,248 ,249 ,250 ,251 ,251 ,252 ,253 ,253 ,254 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255}; unsigned char b_to_u[512] = {0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,1 ,1 ,2 ,2 ,3 ,3 ,4 ,4 ,5 ,6 ,6 ,7 ,7 ,8 ,8 ,9 ,10 ,10 ,11 ,11 ,12 ,12 ,13 ,14 ,14 ,15 ,15 ,16 ,16 ,17 ,17 ,18 ,19 ,19 ,20 ,20 ,21 ,21 ,22 ,23 ,23 ,24 ,24 ,25 ,25 ,26 ,26 ,27 ,28 ,28 ,29 ,29 ,30 ,30 ,31 ,32 ,32 ,33 ,33 ,34 ,34 ,35 ,36 ,36 ,37 ,37 ,38 ,38 ,39 ,39 ,40 ,41 ,41 ,42 ,42 ,43 ,43 ,44 ,45 ,45 ,46 ,46 ,47 ,47 ,48 ,48 ,49 ,50 ,50 ,51 ,51 ,52 ,52 ,53 ,54 ,54 ,55 ,55 ,56 ,56 ,57 ,58 ,58 ,59 ,59 ,60 ,60 ,61 ,61 ,62 ,63 ,63 ,64 ,64 ,65 ,65 ,66 ,67 ,67 ,68 ,68 ,69 ,69 ,70 ,70 ,71 ,72 ,72 ,73 ,73 ,74 ,74 ,75 ,76 ,76 ,77 ,77 ,78 ,78 ,79 ,79 ,80 ,81 ,81 ,82 ,82 ,83 ,83 ,84 ,85 ,85 ,86 ,86 ,87 ,87 ,88 ,89 ,89 ,90 ,90 ,91 ,91 ,92 ,92 ,93 ,94 ,94 ,95 ,95 ,96 ,96 ,97 ,98 ,98 ,99 ,99 ,100 ,100 ,101 ,101 ,102 ,103 ,103 ,104 ,104 ,105 ,105 ,106 ,107 ,107 ,108 ,108 ,109 ,109 ,110 ,111 ,111 ,112 ,112 ,113 ,113 ,114 ,114 ,115 ,116 ,116 ,117 ,117 ,118 ,118 ,119 ,120 ,120 ,121 ,121 ,122 ,122 ,123 ,123 ,124 ,125 ,125 ,126 ,126 ,127 ,127 , 128 ,129 ,129 ,130 ,130 ,131 ,131 ,132 ,133 ,133 ,134 ,134 ,135 ,135 ,136 ,136 ,137 ,138 ,138 ,139 ,139 ,140 ,140 ,141 ,142 ,142 ,143 ,143 ,144 ,144 ,145 ,145 ,146 ,147 ,147 ,148 ,148 ,149 ,149 ,150 ,151 ,151 ,152 ,152 ,153 ,153 ,154 ,155 ,155 ,156 ,156 ,157 ,157 ,158 ,158 ,159 ,160 ,160 ,161 ,161 ,162 ,162 ,163 ,164 ,164 ,165 ,165 ,166 ,166 ,167 ,167 ,168 ,169 ,169 ,170 ,170 ,171 ,171 ,172 ,173 ,173 ,174 ,174 ,175 ,175 ,176 ,177 ,177 ,178 ,178 ,179 ,179 ,180 ,180 ,181 ,182 ,182 ,183 ,183 ,184 ,184 ,185 ,186 ,186 ,187 ,187 ,188 ,188 ,189 ,189 ,190 ,191 ,191 ,192 ,192 ,193 ,193 ,194 ,195 ,195 ,196 ,196 ,197 ,197 ,198 ,199 ,199 ,200 ,200 ,201 ,201 ,202 ,202 ,203 ,204 ,204 ,205 ,205 ,206 ,206 ,207 ,208 ,208 ,209 ,209 ,210 ,210 ,211 ,211 ,212 ,213 ,213 ,214 ,214 ,215 ,215 ,216 ,217 ,217 ,218 ,218 ,219 ,219 ,220 ,220 ,221 ,222 ,222 ,223 ,223 ,224 ,224 ,225 ,226 ,226 ,227 ,227 ,228 ,228 ,229 ,230 ,230 ,231 ,231 ,232 ,232 ,233 ,233 ,234 ,235 ,235 ,236 ,236 ,237 ,237 ,238 ,239 ,239 ,240 ,240 ,241 ,241 ,242 ,242 ,243 ,244 ,244 ,245 ,245 ,246 ,246 ,247 ,248 ,248 ,249 ,249 ,250 ,250 ,251 ,252 ,252 ,253 ,253 ,254 ,254 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255}; void m_ImageYUVToRGB(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel!=3),EXIT,"invalid input"); if(dst==NULL) dst = src; if(dst!=src) mImageRedefine(dst,MAX(3,dst->channel),src->height,src->width,dst->data); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; #pragma omp parallel for for(j=ImageY1(dst);j<ImageY2(dst);j++) { for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { unsigned char y = src->data[0][j][i]; unsigned char u = src->data[1][j][i]; unsigned char v = src->data[2][j][i]; short r = y + v_to_r[v]; short g = y - u_to_g[u] - v_to_g[v]; short b = y + u_to_b[u]; if(r<0) dst->data[2][j][i]=0; else if(r>255) dst->data[2][j][i]=255; else dst->data[2][j][i] = r; if(g<0) dst->data[1][j][i]=0; else if(g>255) dst->data[1][j][i]=255; else dst->data[1][j][i] = g; if(b<0) dst->data[0][j][i]=0; else if(b>255) dst->data[0][j][i]=255; else dst->data[0][j][i] = b; } } *ImageType(dst)=(dst->channel==3)?MORN_IMAGE_RGB:MORN_IMAGE_RGBA; } void m_ImageYUV422ToRGB(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel!=1),EXIT,"invalid input"); mImageRedefine(dst,MAX(3,dst->channel),src->height,src->width/2,dst->data); unsigned char **sdata=src->data[0]; int j; #pragma omp parallel for for(j=0;j<dst->height;j++) { unsigned char y,u,v; short r,g,b; u = sdata[j][0]; for(int i=0;i<dst->width;i+=2) { y=sdata[j][i+i+1]; v=sdata[j][i+i+2]; r = y + v_to_r[v];g = y - u_to_g[u] - v_to_g[v];b = y + u_to_b[u]; if(r<0) dst->data[2][j][i]=0; else if(r>255) dst->data[2][j][i]=255; else dst->data[2][j][i] = r; if(g<0) dst->data[1][j][i]=0; else if(g>255) dst->data[1][j][i]=255; else dst->data[1][j][i] = g; if(b<0) dst->data[0][j][i]=0; else if(b>255) dst->data[0][j][i]=255; else dst->data[0][j][i] = b; y=sdata[j][i+i+3];u=sdata[j][i+i+4]; r = y + v_to_r[v];g = y - u_to_g[u] - v_to_g[v];b = y + u_to_b[u]; if(r<0) dst->data[2][j][i+1]=0; else if(r>255) dst->data[2][j][i+1]=255; else dst->data[2][j][i+1] = r; if(g<0) dst->data[1][j][i+1]=0; else if(g>255) dst->data[1][j][i+1]=255; else dst->data[1][j][i+1] = g; if(b<0) dst->data[0][j][i+1]=0; else if(b>255) dst->data[0][j][i+1]=255; else dst->data[0][j][i+1] = b; } } *(ImageType(dst))=(dst->channel==3)?MORN_IMAGE_RGB:MORN_IMAGE_RGBA; } void m_ImageYUVToGray(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel<3),EXIT,"invalid input"); if(INVALID_POINTER(dst)) dst = src; if(dst!=src) mImageRedefine(dst,1,src->height,src->width,dst->data); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; for(j=ImageY1(dst);j<ImageY2(dst);j++) memcpy(dst->data[0][j]+ImageX1(dst,j),src->data[0][j],(ImageX2(src,j)-ImageX1(dst,j))*sizeof(unsigned char)); *(ImageType(dst))=MORN_IMAGE_GRAY; dst->channel = 1; } void m_ImageRGBToYUV(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel<3),EXIT,"invalid input"); if(dst==NULL) dst = src; if(dst!=src) mImageRedefine(dst,3,src->height,src->width,dst->data); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; for(j=ImageY1(dst);j<ImageY2(dst);j++)for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { unsigned char b = src->data[0][j][i]; unsigned char g = src->data[1][j][i]; unsigned char r = src->data[2][j][i]; unsigned char y = r_to_y[r] + g_to_y[g] + b_to_y[b]; dst->data[0][j][i] = y; dst->data[1][j][i] = b_to_u[256+b-y]; dst->data[2][j][i] = r_to_v[256+r-y]; } *ImageType(dst)=MORN_IMAGE_YUV; } void m_ImageRGBToGray(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel<3),EXIT,"invalid input"); if(dst==NULL) dst = src; if(dst!=src) mImageRedefine(dst,1,src->height,src->width,dst->data); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; for(j=ImageY1(dst);j<ImageY2(dst);j++)for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { unsigned char b = src->data[0][j][i]; unsigned char g = src->data[1][j][i]; unsigned char r = src->data[2][j][i]; dst->data[0][j][i] = r_to_y[r] + g_to_y[g] + b_to_y[b]; } *ImageType(dst)=MORN_IMAGE_GRAY; dst->channel = 1; } void m_ImageToGray(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); int *image_type = ImageType(src); if(*image_type == MORN_IMAGE_GRAY) mImageCopy(src,dst); else if((*image_type == MORN_IMAGE_RGB)||(*image_type == MORN_IMAGE_RGBA)) m_ImageRGBToGray(src,dst); else if(*image_type == MORN_IMAGE_YUV) m_ImageYUVToGray(src,dst); else mException(1,EXIT,"invalid image type %d",*image_type); } void m_ImageSaturation(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel<3),EXIT,"invalid input"); if(dst==NULL) dst = src; mImageRedefine(dst,1,src->height,src->width); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; for(j=ImageY1(src);j<ImageY2(src);j++)for(int i=ImageX1(src,j);i<ImageX2(src,j);i++) { unsigned char b = src->data[0][j][i]; unsigned char g = src->data[1][j][i]; unsigned char r = src->data[2][j][i]; int max,min; if(r>g) {max=r; min=g;} else {max=g; min=r;} if(b>max) {max=b;} else if(b<min) {min=b;} dst->data[0][j][i] = (max==0)?0:(((max-min)*240)/max); } *ImageType(dst)=MORN_IMAGE_GRAY; } void m_ImageRGBToHSV(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel<3),EXIT,"invalid input"); if(dst==NULL) dst = src; if(dst!=src) mImageRedefine(dst,3,src->height,src->width,dst->data); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; for(j=ImageY1(dst);j<ImageY2(dst);j++)for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { unsigned char b = src->data[0][j][i]; unsigned char g = src->data[1][j][i]; unsigned char r = src->data[2][j][i]; int max,min; if(r>g) {max=r; min=g;} else {max=g; min=r;} if(b>max) {max=b;} else if(b<min) {min=b;} if(max==0) { dst->data[0][j][i]=0;dst->data[1][j][i]=0;dst->data[2][j][i]=0; continue; } int value = max-min; dst->data[2][j][i] = max*240/256; dst->data[1][j][i] = (value*240)/max; if(value==0) dst->data[0][j][i]=0; else if(max==r) { if(min==b) dst->data[0][j][i]= ((g-b)*40)/value; else dst->data[0][j][i]=240+((g-b)*40)/value; } else if(max==g) dst->data[0][j][i]= 80+((b-r)*40)/value; else if(max==b) dst->data[0][j][i]=160+((r-g)*40)/value; } *ImageType(dst)=MORN_IMAGE_HSV; } void m_ImageHSVToRGB(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel<3),EXIT,"invalid input"); if(dst==NULL) dst = src; if(dst!=src) mImageRedefine(dst,MAX(3,dst->channel),src->height,src->width,dst->data); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; for(j=ImageY1(dst);j<ImageY2(dst);j++)for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { unsigned char h = src->data[0][j][i]; unsigned char s = src->data[1][j][i]; unsigned char v = src->data[2][j][i]; int max = v*256/240; int value = (max*s/240); int min = max-value; unsigned char r,g,b; if(h< 40) {r=max;b=min;g=min+( h *value/40);} else if(h< 80) {g=max;b=min;r=min+(( 80-h)*value/40);} else if(h<120) {g=max;r=min;b=min+((h- 80)*value/40);} else if(h<160) {b=max;r=min;g=min+((160-h)*value/40);} else if(h<200) {b=max;g=min;r=min+((h-160)*value/40);} else {r=max;g=min;b=min+((240-h)*value/40);} dst->data[0][j][i] = b; dst->data[1][j][i] = g; dst->data[2][j][i] = r; } *ImageType(dst)=(dst->channel==3)?MORN_IMAGE_RGB:MORN_IMAGE_RGBA; } /* void mColorDiff(MImage *src,MImage *dst,unsigned char *color) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); int height = src->height; int width = src->width; unsigned char ***data = src->data; if(src->info.image_type==MORN_IMAGE_RGB) { mException((src->channel!=3),EXIT,"invalid input"); float r0=color[2];float g0=color[1];float b0=color[0]; printf("r0 is %f,g0 is %f,b0 is %f\n",r0,g0,b0); for(int j=0;j<height;j++) for(int i=0;i<width;i++) { float r = data[2][j][i];float g = data[1][j][i];float b = data[0][j][i]; // float diff = (ABS(r*g0-r0*g)/(r*g)+ABS(g*b0-g0*b)/(g*b)+ABS(b*r0-b0*g))/(b*r); // diff = diff*1024; // float diff = sqrt((r-r0)*(r-r0)+(g-g0)*(g-g0)+(b-b0)*(b-b0)); float diff = MAX(MAX(ABS(r-r0),ABS(b-b0)),ABS(g-g0)); dst->data[0][j][i] = (diff>255)?255:diff; } } else mException(1,EXIT,"invalid input"); } */ unsigned char r2l[256]={0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,3,4,4,4,4,4,5,5,5,5,6,6,6,6,6,7,7,7,7,7,8,8,8,8,9,9,9,9,9,10,10,10,10,10,11,11,11,11,11,12,12,12,12,13,13,13,13,13,14,14,14,14,14,15,15,15,15,16,16,16,16,16,17,17,17,17,17,18,18,18,18,18,19,19,19,19,20,20,20,20,20,21,21,21,21,21,22,22,22,22,23,23,23,23,23,24,24,24,24,24,25,25,25,25,26,26,26,26,26,27,27,27,27,27,28,28,28,28,28,29,29,29,29,30,30,30,30,30,31,31,31,31,31,32,32,32,32,33,33,33,33,33,34,34,34,34,34,35,35,35,35,36,36,36,36,36,37,37,37,37,37,38,38,38,38,38,39,39,39,39,40,40,40,40,40,41,41,41,41,41,42,42,42,42,43,43,43,43,43,44,44,44,44,44,45,45,45,45,45,46,46,46,46,47,47,47,47,47,48,48,48,48,48,49,49,49,49,50,50,50,50,50,51,51,51,51,51,52,52,52,52,53,53,53,53,53,54,54,54,54}; unsigned char g2l[256]={0,1,1,2,3,4,4,5,6,6,7,8,9,9,10,11,11,12,13,14,14,15,16,16,17,18,19,19,20,21,21,22,23,24,24,25,26,26,27,28,29,29,30,31,31,32,33,34,34,35,36,36,37,38,39,39,40,41,41,42,43,44,44,45,46,46,47,48,49,49,50,51,51,52,53,54,54,55,56,57,57,58,59,59,60,61,62,62,63,64,64,65,66,67,67,68,69,69,70,71,72,72,73,74,74,75,76,77,77,78,79,79,80,81,82,82,83,84,84,85,86,87,87,88,89,89,90,91,92,92,93,94,94,95,96,97,97,98,99,99,100,101,102,102,103,104,104,105,106,107,107,108,109,109,110,111,112,112,113,114,114,115,116,117,117,118,119,119,120,121,122,122,123,124,124,125,126,127,127,128,129,129,130,131,132,132,133,134,134,135,136,137,137,138,139,139,140,141,142,142,143,144,144,145,146,147,147,148,149,149,150,151,152,152,153,154,154,155,156,157,157,158,159,159,160,161,162,162,163,164,164,165,166,167,167,168,169,170,170,171,172,172,173,174,175,175,176,177,177,178,179,180,180,181,182,182}; unsigned char b2l[256]={0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17,18,18,18,18,18,18,18,18,18,18,18,18,18}; unsigned char r2a[256]={0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7,8,8,8,8,9,9,9,10,10,10,11,11,11,12,12,12,13,13,13,14,14,14,15,15,15,16,16,16,17,17,17,18,18,18,19,19,19,20,20,20,21,21,21,22,22,22,23,23,23,24,24,24,24,25,25,25,26,26,26,27,27,27,28,28,28,29,29,29,30,30,30,31,31,31,32,32,32,33,33,33,34,34,34,35,35,35,36,36,36,37,37,37,38,38,38,39,39,39,39,40,40,40,41,41,41,42,42,42,43,43,43,44,44,44,45,45,45,46,46,46,47,47,47,48,48,48,49,49,49,50,50,50,51,51,51,52,52,52,53,53,53,54,54,54,55,55,55,55,56,56,56,57,57,57,58,58,58,59,59,59,60,60,60,61,61,61,62,62,62,63,63,63,64,64,64,65,65,65,66,66,66,67,67,67,68,68,68,69,69,69,70,70,70,71,71,71,71,72,72,72,73,73,73,74,74,74,75,75,75,76,76,76,77,77,77,78,78,78,79,79,79,80,80,80,81,81,81,82,82,82,83,83,83}; unsigned char b2a[256]={0,0,0,1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,4,4,4,4,4,5,5,5,5,5,5,6,6,6,6,6,6,7,7,7,7,7,7,8,8,8,8,8,9,9,9,9,9,9,10,10,10,10,10,10,11,11,11,11,11,11,12,12,12,12,12,12,13,13,13,13,13,14,14,14,14,14,14,15,15,15,15,15,15,16,16,16,16,16,16,17,17,17,17,17,18,18,18,18,18,18,19,19,19,19,19,19,20,20,20,20,20,20,21,21,21,21,21,22,22,22,22,22,22,23,23,23,23,23,23,24,24,24,24,24,24,25,25,25,25,25,26,26,26,26,26,26,27,27,27,27,27,27,28,28,28,28,28,28,29,29,29,29,29,30,30,30,30,30,30,31,31,31,31,31,31,32,32,32,32,32,32,33,33,33,33,33,34,34,34,34,34,34,35,35,35,35,35,35,36,36,36,36,36,36,37,37,37,37,37,37,38,38,38,38,38,39,39,39,39,39,39,40,40,40,40,40,40,41,41,41,41,41,41,42,42,42,42,42,43,43,43,43,43,43,44,44,44,44,44}; unsigned char r2b[256]={0,0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,18,18,18,18,18,18,18,18,19,19,19,19,19,19,19,19,19,20,20,20,20,20,20,20,20,21,21,21,21,21,21,21,21,22,22,22,22,22,22,22,22,23,23,23,23,23,23,23,23,23,24,24,24,24,24,24,24,24,25,25,25,25,25,25,25,25,26,26,26,26,26,26,26,26,27,27,27,27,27,27,27,27,28,28,28,28,28,28,28,28,28,29,29,29,29,29,29,29,29,30,30,30,30,30,30,30,30,31,31,31,31,31}; unsigned char g2b[256]={0,0,1,1,2,2,2,3,3,3,4,4,5,5,5,6,6,6,7,7,8,8,8,9,9,9,10,10,11,11,11,12,12,12,13,13,14,14,14,15,15,16,16,16,17,17,17,18,18,19,19,19,20,20,20,21,21,22,22,22,23,23,23,24,24,25,25,25,26,26,26,27,27,28,28,28,29,29,30,30,30,31,31,31,32,32,33,33,33,34,34,34,35,35,36,36,36,37,37,37,38,38,39,39,39,40,40,40,41,41,42,42,42,43,43,44,44,44,45,45,45,46,46,47,47,47,48,48,48,49,49,50,50,50,51,51,51,52,52,53,53,53,54,54,54,55,55,56,56,56,57,57,57,58,58,59,59,59,60,60,61,61,61,62,62,62,63,63,64,64,64,65,65,65,66,66,67,67,67,68,68,68,69,69,70,70,70,71,71,71,72,72,73,73,73,74,74,75,75,75,76,76,76,77,77,78,78,78,79,79,79,80,80,81,81,81,82,82,82,83,83,84,84,84,85,85,85,86,86,87,87,87,88,88,89,89,89,90,90,90,91,91,92,92,92,93,93,93,94,94,95,95,95,96,96,96}; void mImageRGBToLAB(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel<3),EXIT,"invalid input"); if(dst==NULL) dst = src; if(dst!=src) mImageRedefine(dst,3,src->height,src->width,dst->data); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; for(j=ImageY1(dst);j<ImageY2(dst);j++)for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { unsigned char b = src->data[0][j][i]; unsigned char g = src->data[1][j][i]; unsigned char r = src->data[2][j][i]; dst->data[0][j][i] = r2l[r] + g2l[g] + b2l[b]; int aa = (r2a[r] - (g>>2) + b2a[b])*2.55;if(aa>255)aa=255;else if(aa<0)aa=0; int bb = (r2b[r] + g2b[g] - (b>>2))*2.55;if(bb>255)bb=255;else if(bb<0)bb=0; dst->data[1][j][i]=aa; dst->data[2][j][i]=bb; } *ImageType(dst)=MORN_IMAGE_LAB; }
OnDiscMSExperiment.h
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2020. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Hannes Roest $ // -------------------------------------------------------------------------- #pragma once #include <OpenMS/INTERFACES/DataStructures.h> #include <OpenMS/KERNEL/MSExperiment.h> #include <OpenMS/KERNEL/MSSpectrum.h> #include <OpenMS/KERNEL/MSChromatogram.h> #include <OpenMS/METADATA/ExperimentalSettings.h> #include <OpenMS/FORMAT/HANDLERS/IndexedMzMLHandler.h> #include <vector> #include <algorithm> #include <limits> #include <boost/shared_ptr.hpp> namespace OpenMS { /** @brief Representation of a mass spectrometry experiment on disk. @ingroup Kernel @note This implementation is @a not thread-safe since it keeps internally a single file access pointer which it moves when accessing a specific data item. Please provide a separate copy to each thread, e.g. @code #pragma omp parallel for firstprivate(ondisc_map) @endcode */ class OPENMS_DLLAPI OnDiscMSExperiment { typedef ChromatogramPeak ChromatogramPeakT; typedef Peak1D PeakT; public: /** @brief Constructor This initializes the object, use openFile to open a file. */ OnDiscMSExperiment() {} /** @brief Open a specific file on disk. This tries to read the indexed mzML by parsing the index and then reading the meta information into memory. @return Whether the parsing of the file was successful (if false, the file most likely was not an indexed mzML file) */ bool openFile(const String& filename, bool skipMetaData = false) { filename_ = filename; indexed_mzml_file_.openFile(filename); if (filename != "" && !skipMetaData) { loadMetaData_(filename); } return indexed_mzml_file_.getParsingSuccess(); } /// Copy constructor OnDiscMSExperiment(const OnDiscMSExperiment& source) : filename_(source.filename_), indexed_mzml_file_(source.indexed_mzml_file_), meta_ms_experiment_(source.meta_ms_experiment_) { } /** @brief Equality operator This only checks whether the underlying file is the same and the parsed meta-information is the same. Note that the file reader (e.g. the std::ifstream of the file) might be in a different state. */ bool operator==(const OnDiscMSExperiment& rhs) const { if (meta_ms_experiment_ == nullptr || rhs.meta_ms_experiment_ == nullptr) { return filename_ == rhs.filename_ && meta_ms_experiment_ == rhs.meta_ms_experiment_; } // check if file and meta information is the same return filename_ == rhs.filename_ && (*meta_ms_experiment_) == (*rhs.meta_ms_experiment_); // do not check if indexed_mzml_file_ is equal -> they have the same filename... } /// Inequality operator bool operator!=(const OnDiscMSExperiment& rhs) const { return !(operator==(rhs)); } /** @brief Checks if all spectra are sorted with respect to ascending RT Note that we cannot check whether all spectra are sorted (except if we were to load them all and check). */ bool isSortedByRT() const { if (!meta_ms_experiment_) return false; return meta_ms_experiment_->isSorted(false); } /// alias for getNrSpectra inline Size size() const { return getNrSpectra(); } /// returns whether spectra are empty inline bool empty() const { return indexed_mzml_file_.getNrSpectra() == 0; } /// get the total number of spectra available inline Size getNrSpectra() const { return indexed_mzml_file_.getNrSpectra(); } /// get the total number of chromatograms available inline Size getNrChromatograms() const { return indexed_mzml_file_.getNrChromatograms(); } /// returns the meta information of this experiment (const access) boost::shared_ptr<const ExperimentalSettings> getExperimentalSettings() const { return boost::static_pointer_cast<const ExperimentalSettings>(meta_ms_experiment_); } boost::shared_ptr<PeakMap> getMetaData() const { return meta_ms_experiment_; } /// alias for getSpectrum inline MSSpectrum operator[](Size n) { return getSpectrum(n); } /** @brief returns a single spectrum @param id The index of the spectrum */ MSSpectrum getSpectrum(Size id) { if (!meta_ms_experiment_) return indexed_mzml_file_.getMSSpectrumById(int(id)); MSSpectrum spectrum(meta_ms_experiment_->operator[](id)); indexed_mzml_file_.getMSSpectrumById(int(id), spectrum); return spectrum; } /** @brief returns a single spectrum */ OpenMS::Interfaces::SpectrumPtr getSpectrumById(Size id) { return indexed_mzml_file_.getSpectrumById(id); } /** @brief returns a single chromatogram @param id The index of the chromatogram */ MSChromatogram getChromatogram(Size id) { if (!meta_ms_experiment_) return indexed_mzml_file_.getMSChromatogramById(int(id)); MSChromatogram chromatogram(meta_ms_experiment_->getChromatogram(id)); indexed_mzml_file_.getMSChromatogramById(int(id), chromatogram); return chromatogram; } /** @brief returns a single chromatogram @param id The native identifier of the chromatogram */ MSChromatogram getChromatogramByNativeId(const std::string& id); /** @brief returns a single spectrum @param id The native identifier of the spectrum */ MSSpectrum getSpectrumByNativeId(const std::string& id); /** @brief returns a single chromatogram */ OpenMS::Interfaces::ChromatogramPtr getChromatogramById(Size id) { return indexed_mzml_file_.getChromatogramById(id); } /// sets whether to skip some XML checks and be fast instead void setSkipXMLChecks(bool skip) { indexed_mzml_file_.setSkipXMLChecks(skip); } private: /// Private Assignment operator -> we cannot copy file streams in IndexedMzMLHandler OnDiscMSExperiment& operator=(const OnDiscMSExperiment& /* source */); void loadMetaData_(const String& filename); MSChromatogram getMetaChromatogramById_(const std::string& id); MSSpectrum getMetaSpectrumById_(const std::string& id); protected: /// The filename of the underlying data file String filename_; /// The index of the underlying data file Internal::IndexedMzMLHandler indexed_mzml_file_; /// The meta-data boost::shared_ptr<PeakMap> meta_ms_experiment_; /// Mapping of chromatogram native ids to offsets std::unordered_map< std::string, Size > chromatograms_native_ids_; /// Mapping of spectra native ids to offsets std::unordered_map< std::string, Size > spectra_native_ids_; }; typedef OpenMS::OnDiscMSExperiment OnDiscPeakMap; } // namespace OpenMS
convolutiondepthwise_3x3_pack8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); float* outptr1 = out.row(1); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00 = _mm256_loadu_ps(k0); __m256 _k01 = _mm256_loadu_ps(k0 + 8); __m256 _k02 = _mm256_loadu_ps(k0 + 16); __m256 _k10 = _mm256_loadu_ps(k0 + 24); __m256 _k11 = _mm256_loadu_ps(k0 + 32); __m256 _k12 = _mm256_loadu_ps(k0 + 40); __m256 _k20 = _mm256_loadu_ps(k0 + 48); __m256 _k21 = _mm256_loadu_ps(k0 + 56); __m256 _k22 = _mm256_loadu_ps(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm256_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm256_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm256_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm256_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm256_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm256_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm256_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm256_fmadd_ps(_k22, _r23, _sum1); __m256 _sum2 = _bias0; __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_fmadd_ps(_k00, _r02, _sum2); _sum2 = _mm256_fmadd_ps(_k01, _r03, _sum2); _sum2 = _mm256_fmadd_ps(_k02, _r04, _sum2); _sum2 = _mm256_fmadd_ps(_k10, _r12, _sum2); _sum2 = _mm256_fmadd_ps(_k11, _r13, _sum2); _sum2 = _mm256_fmadd_ps(_k12, _r14, _sum2); _sum2 = _mm256_fmadd_ps(_k20, _r22, _sum2); _sum2 = _mm256_fmadd_ps(_k21, _r23, _sum2); _sum2 = _mm256_fmadd_ps(_k22, _r24, _sum2); __m256 _sum3 = _bias0; __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r15 = _mm256_loadu_ps(r1 + 40); __m256 _r25 = _mm256_loadu_ps(r2 + 40); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_fmadd_ps(_k00, _r03, _sum3); _sum3 = _mm256_fmadd_ps(_k01, _r04, _sum3); _sum3 = _mm256_fmadd_ps(_k02, _r05, _sum3); _sum3 = _mm256_fmadd_ps(_k10, _r13, _sum3); _sum3 = _mm256_fmadd_ps(_k11, _r14, _sum3); _sum3 = _mm256_fmadd_ps(_k12, _r15, _sum3); _sum3 = _mm256_fmadd_ps(_k20, _r23, _sum3); _sum3 = _mm256_fmadd_ps(_k21, _r24, _sum3); _sum3 = _mm256_fmadd_ps(_k22, _r25, _sum3); __m256 _sum4 = _bias0; __m256 _r06 = _mm256_loadu_ps(r0 + 48); __m256 _r16 = _mm256_loadu_ps(r1 + 48); __m256 _r26 = _mm256_loadu_ps(r2 + 48); _mm256_storeu_ps(outptr0 + 24, _sum3); _sum4 = _mm256_fmadd_ps(_k00, _r04, _sum4); _sum4 = _mm256_fmadd_ps(_k01, _r05, _sum4); _sum4 = _mm256_fmadd_ps(_k02, _r06, _sum4); _sum4 = _mm256_fmadd_ps(_k10, _r14, _sum4); _sum4 = _mm256_fmadd_ps(_k11, _r15, _sum4); _sum4 = _mm256_fmadd_ps(_k12, _r16, _sum4); _sum4 = _mm256_fmadd_ps(_k20, _r24, _sum4); _sum4 = _mm256_fmadd_ps(_k21, _r25, _sum4); _sum4 = _mm256_fmadd_ps(_k22, _r26, _sum4); __m256 _sum5 = _bias0; __m256 _r07 = _mm256_loadu_ps(r0 + 56); __m256 _r17 = _mm256_loadu_ps(r1 + 56); __m256 _r27 = _mm256_loadu_ps(r2 + 56); _mm256_storeu_ps(outptr0 + 32, _sum4); _sum5 = _mm256_fmadd_ps(_k00, _r05, _sum5); _sum5 = _mm256_fmadd_ps(_k01, _r06, _sum5); _sum5 = _mm256_fmadd_ps(_k02, _r07, _sum5); _sum5 = _mm256_fmadd_ps(_k10, _r15, _sum5); _sum5 = _mm256_fmadd_ps(_k11, _r16, _sum5); _sum5 = _mm256_fmadd_ps(_k12, _r17, _sum5); _sum5 = _mm256_fmadd_ps(_k20, _r25, _sum5); _sum5 = _mm256_fmadd_ps(_k21, _r26, _sum5); _sum5 = _mm256_fmadd_ps(_k22, _r27, _sum5); __m256 _sum6 = _bias0; __m256 _r08 = _mm256_loadu_ps(r0 + 64); __m256 _r18 = _mm256_loadu_ps(r1 + 64); __m256 _r28 = _mm256_loadu_ps(r2 + 64); _mm256_storeu_ps(outptr0 + 40, _sum5); _sum6 = _mm256_fmadd_ps(_k00, _r06, _sum6); _sum6 = _mm256_fmadd_ps(_k01, _r07, _sum6); _sum6 = _mm256_fmadd_ps(_k02, _r08, _sum6); _sum6 = _mm256_fmadd_ps(_k10, _r16, _sum6); _sum6 = _mm256_fmadd_ps(_k11, _r17, _sum6); _sum6 = _mm256_fmadd_ps(_k12, _r18, _sum6); _sum6 = _mm256_fmadd_ps(_k20, _r26, _sum6); _sum6 = _mm256_fmadd_ps(_k21, _r27, _sum6); _sum6 = _mm256_fmadd_ps(_k22, _r28, _sum6); __m256 _sum7 = _bias0; __m256 _r09 = _mm256_loadu_ps(r0 + 72); __m256 _r19 = _mm256_loadu_ps(r1 + 72); __m256 _r29 = _mm256_loadu_ps(r2 + 72); _mm256_storeu_ps(outptr0 + 48, _sum6); _sum7 = _mm256_fmadd_ps(_k00, _r07, _sum7); _sum7 = _mm256_fmadd_ps(_k01, _r08, _sum7); _sum7 = _mm256_fmadd_ps(_k02, _r09, _sum7); _sum7 = _mm256_fmadd_ps(_k10, _r17, _sum7); _sum7 = _mm256_fmadd_ps(_k11, _r18, _sum7); _sum7 = _mm256_fmadd_ps(_k12, _r19, _sum7); _sum7 = _mm256_fmadd_ps(_k20, _r27, _sum7); _sum7 = _mm256_fmadd_ps(_k21, _r28, _sum7); _sum7 = _mm256_fmadd_ps(_k22, _r29, _sum7); _mm256_storeu_ps(outptr0 + 56, _sum7); r0 += 64; r1 += 64; r2 += 64; outptr0 += 64; } for (; j + 3 < outw; j += 4) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm256_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm256_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm256_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm256_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm256_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm256_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm256_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm256_fmadd_ps(_k22, _r23, _sum1); __m256 _sum2 = _bias0; __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_fmadd_ps(_k00, _r02, _sum2); _sum2 = _mm256_fmadd_ps(_k01, _r03, _sum2); _sum2 = _mm256_fmadd_ps(_k02, _r04, _sum2); _sum2 = _mm256_fmadd_ps(_k10, _r12, _sum2); _sum2 = _mm256_fmadd_ps(_k11, _r13, _sum2); _sum2 = _mm256_fmadd_ps(_k12, _r14, _sum2); _sum2 = _mm256_fmadd_ps(_k20, _r22, _sum2); _sum2 = _mm256_fmadd_ps(_k21, _r23, _sum2); _sum2 = _mm256_fmadd_ps(_k22, _r24, _sum2); __m256 _sum3 = _bias0; __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r15 = _mm256_loadu_ps(r1 + 40); __m256 _r25 = _mm256_loadu_ps(r2 + 40); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_fmadd_ps(_k00, _r03, _sum3); _sum3 = _mm256_fmadd_ps(_k01, _r04, _sum3); _sum3 = _mm256_fmadd_ps(_k02, _r05, _sum3); _sum3 = _mm256_fmadd_ps(_k10, _r13, _sum3); _sum3 = _mm256_fmadd_ps(_k11, _r14, _sum3); _sum3 = _mm256_fmadd_ps(_k12, _r15, _sum3); _sum3 = _mm256_fmadd_ps(_k20, _r23, _sum3); _sum3 = _mm256_fmadd_ps(_k21, _r24, _sum3); _sum3 = _mm256_fmadd_ps(_k22, _r25, _sum3); _mm256_storeu_ps(outptr0 + 24, _sum3); r0 += 32; r1 += 32; r2 += 32; outptr0 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm256_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm256_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm256_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm256_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm256_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm256_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm256_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm256_fmadd_ps(_k22, _r23, _sum1); _mm256_storeu_ps(outptr0 + 8, _sum1); r0 += 16; r1 += 16; r2 += 16; outptr0 += 16; } for (; j < outw; j++) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0); _mm256_storeu_ps(outptr0, _sum0); r0 += 8; r1 += 8; r2 += 8; outptr0 += 8; } r0 += 2 * 8; r1 += 2 * 8; r2 += 2 * 8; } } } static void convdw3x3s2_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 8; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); float* outptr1 = out.row(1); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00 = _mm256_loadu_ps(k0); __m256 _k01 = _mm256_loadu_ps(k0 + 8); __m256 _k02 = _mm256_loadu_ps(k0 + 16); __m256 _k10 = _mm256_loadu_ps(k0 + 24); __m256 _k11 = _mm256_loadu_ps(k0 + 32); __m256 _k12 = _mm256_loadu_ps(k0 + 40); __m256 _k20 = _mm256_loadu_ps(k0 + 48); __m256 _k21 = _mm256_loadu_ps(k0 + 56); __m256 _k22 = _mm256_loadu_ps(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_fmadd_ps(_k00, _r02, _sum1); _sum1 = _mm256_fmadd_ps(_k01, _r03, _sum1); _sum1 = _mm256_fmadd_ps(_k02, _r04, _sum1); _sum1 = _mm256_fmadd_ps(_k10, _r12, _sum1); _sum1 = _mm256_fmadd_ps(_k11, _r13, _sum1); _sum1 = _mm256_fmadd_ps(_k12, _r14, _sum1); _sum1 = _mm256_fmadd_ps(_k20, _r22, _sum1); _sum1 = _mm256_fmadd_ps(_k21, _r23, _sum1); _sum1 = _mm256_fmadd_ps(_k22, _r24, _sum1); __m256 _sum2 = _bias0; __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r15 = _mm256_loadu_ps(r1 + 40); __m256 _r25 = _mm256_loadu_ps(r2 + 40); __m256 _r06 = _mm256_loadu_ps(r0 + 48); __m256 _r16 = _mm256_loadu_ps(r1 + 48); __m256 _r26 = _mm256_loadu_ps(r2 + 48); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_fmadd_ps(_k00, _r04, _sum2); _sum2 = _mm256_fmadd_ps(_k01, _r05, _sum2); _sum2 = _mm256_fmadd_ps(_k02, _r06, _sum2); _sum2 = _mm256_fmadd_ps(_k10, _r14, _sum2); _sum2 = _mm256_fmadd_ps(_k11, _r15, _sum2); _sum2 = _mm256_fmadd_ps(_k12, _r16, _sum2); _sum2 = _mm256_fmadd_ps(_k20, _r24, _sum2); _sum2 = _mm256_fmadd_ps(_k21, _r25, _sum2); _sum2 = _mm256_fmadd_ps(_k22, _r26, _sum2); __m256 _sum3 = _bias0; __m256 _r07 = _mm256_loadu_ps(r0 + 56); __m256 _r17 = _mm256_loadu_ps(r1 + 56); __m256 _r27 = _mm256_loadu_ps(r2 + 56); __m256 _r08 = _mm256_loadu_ps(r0 + 64); __m256 _r18 = _mm256_loadu_ps(r1 + 64); __m256 _r28 = _mm256_loadu_ps(r2 + 64); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_fmadd_ps(_k00, _r06, _sum3); _sum3 = _mm256_fmadd_ps(_k01, _r07, _sum3); _sum3 = _mm256_fmadd_ps(_k02, _r08, _sum3); _sum3 = _mm256_fmadd_ps(_k10, _r16, _sum3); _sum3 = _mm256_fmadd_ps(_k11, _r17, _sum3); _sum3 = _mm256_fmadd_ps(_k12, _r18, _sum3); _sum3 = _mm256_fmadd_ps(_k20, _r26, _sum3); _sum3 = _mm256_fmadd_ps(_k21, _r27, _sum3); _sum3 = _mm256_fmadd_ps(_k22, _r28, _sum3); _mm256_storeu_ps(outptr0 + 24, _sum3); r0 += 2 * 32; r1 += 2 * 32; r2 += 2 * 32; outptr0 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_fmadd_ps(_k00, _r02, _sum1); _sum1 = _mm256_fmadd_ps(_k01, _r03, _sum1); _sum1 = _mm256_fmadd_ps(_k02, _r04, _sum1); _sum1 = _mm256_fmadd_ps(_k10, _r12, _sum1); _sum1 = _mm256_fmadd_ps(_k11, _r13, _sum1); _sum1 = _mm256_fmadd_ps(_k12, _r14, _sum1); _sum1 = _mm256_fmadd_ps(_k20, _r22, _sum1); _sum1 = _mm256_fmadd_ps(_k21, _r23, _sum1); _sum1 = _mm256_fmadd_ps(_k22, _r24, _sum1); _mm256_storeu_ps(outptr0 + 8, _sum1); r0 += 2 * 16; r1 += 2 * 16; r2 += 2 * 16; outptr0 += 16; } for (; j < outw; j++) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0); _mm256_storeu_ps(outptr0, _sum0); r0 += 2 * 8; r1 += 2 * 8; r2 += 2 * 8; outptr0 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
softmax.c
/******************************************************************************* * Copyright 2017-2018 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #include <stdlib.h> #include "MKLDNN.h" #include "omp.h" #include <math.h> void SoftmaxNCHW(unsigned long long input, int N, long long len) { float* inPtr = (float*)input; #pragma omp parallel for for (int i = 0; i < N; ++i) { float *pTemp = inPtr + i * len; float pMax = pTemp[0]; for(long long j = 0; j < len; ++j) { if (pMax < pTemp[j]) { pMax = pTemp[j]; } } float pSum = 0.0f; for(long long j=0; j<len; ++j) { pTemp[j] = exp(pTemp[j] - pMax); pSum += pTemp[j]; } for(long long j=0; j < len; ++j) { pTemp[j] = pTemp[j] / pSum; } } } void SoftmaxCHWN(unsigned long long input, int N, long long len) { float* inPtr = (float*)input; #pragma omp parallel for for (int i = 0; i < N; ++i) { float *pTemp = inPtr + i; float pMax = pTemp[0]; for (long long j = 0; j < len; ++j) { if ( pMax<pTemp[j*N]) pMax = pTemp[j*N]; } float pSum = 0.0f; for (long long j = 0; j < len; ++j) { pTemp[j*N] = exp(pTemp[j*N] - pMax); pSum += pTemp[j*N]; } for(long long j = 0; j < len; ++j) { pTemp[j*N] = pTemp[j*N] / pSum; } } }
zSchCompUdt-2Ddynamic.c
/*! @file * \brief THis file contains the main loop of pdgstrf which involves rank k * update of the Schur complement. * Uses 2D partitioning for the scatter phase. * * <pre> * -- Distributed SuperLU routine (version 4.1) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley. * October 1, 2014 * */ #define SCHEDULE_STRATEGY guided double tt_start; double tt_end; if ( msg0 && msg2 ) { /* L(:,k) and U(k,:) are not empty. */ int cum_nrow=0; int temp_nbrow; lptr = lptr0; luptr = luptr0; /** * seperating L blocks */ int lookAheadBlk=0, RemainBlk=0; tt_start = SuperLU_timer_(); for (int i = 0; i < nlb; ++i) { ib = lsub[lptr]; /* Row block L(i,k). */ temp_nbrow = lsub[lptr+1]; /* Number of full rows. */ int look_up_flag=1; for (int j = k0+1; j < SUPERLU_MIN (k0 + num_look_aheads+2, nsupers ); ++j) { if(ib == perm_c_supno[j]) look_up_flag=0; } if(!look_up_flag) { /* ib is within look up window */ if (lookAheadBlk==0) { lookAheadFullRow[lookAheadBlk] = temp_nbrow; } else { lookAheadFullRow[lookAheadBlk] = temp_nbrow+lookAheadFullRow[lookAheadBlk-1]; } lookAheadStRow[lookAheadBlk] = cum_nrow; lookAhead_lptr[lookAheadBlk] = lptr; lookAhead_ib[lookAheadBlk] = ib; lookAheadBlk++; } else { /* ib is not in look up window */ if (RemainBlk==0) { Remain_info[RemainBlk].FullRow = temp_nbrow; } else { Remain_info[RemainBlk].FullRow = temp_nbrow+Remain_info[RemainBlk-1].FullRow; } RemainStRow[RemainBlk] = cum_nrow; // Remain_lptr[RemainBlk] = lptr; Remain_info[RemainBlk].lptr = lptr; // Remain_ib[RemainBlk] = ib; Remain_info[RemainBlk].ib = ib; RemainBlk++; } cum_nrow +=temp_nbrow; lptr += LB_DESCRIPTOR; /* Skip descriptor. */ lptr += temp_nbrow; luptr += temp_nbrow; } /* for i ... */ lptr = lptr0; luptr = luptr0; /* leading dimension of L buffer */ #if 0 int LDlookAhead_LBuff = lookAheadFullRow[lookAheadBlk-1]; /* may go negative.*/ #else /* Piyush fix */ int LDlookAhead_LBuff = lookAheadBlk==0? 0 :lookAheadFullRow[lookAheadBlk-1]; #endif /* #pragma omp parallel for */ for (int i = 0; i < lookAheadBlk; ++i) { int StRowDest = 0; int temp_nbrow; if (i==0) { temp_nbrow = lookAheadFullRow[0]; } else { StRowDest = lookAheadFullRow[i-1]; temp_nbrow = lookAheadFullRow[i]-lookAheadFullRow[i-1]; } int StRowSource=lookAheadStRow[i]; /* Now copying the matrix*/ // #pragma omp parallel for (gives slow down) for (int j = 0; j < knsupc; ++j) { memcpy(&lookAhead_L_buff[StRowDest+j*LDlookAhead_LBuff], &lusup[luptr+j*nsupr+StRowSource], temp_nbrow * sizeof(doublecomplex) ); } } int LDRemain_LBuff = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow; #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < RemainBlk; ++i) { int StRowDest = 0; int temp_nbrow; if (i==0) { temp_nbrow = Remain_info[0].FullRow; } else { StRowDest = Remain_info[i-1].FullRow; temp_nbrow = Remain_info[i].FullRow-Remain_info[i-1].FullRow; } int StRowSource=RemainStRow[i]; /* Now copying the matrix*/ // #pragma omp parallel for (gives slow down) for (int j = 0; j < knsupc; ++j) { // printf("StRowDest %d LDRemain_LBuff %d StRowSource %d \n", StRowDest ,LDRemain_LBuff ,StRowSource ); memcpy(&Remain_L_buff[StRowDest+j*LDRemain_LBuff], &lusup[luptr+j*nsupr+StRowSource], temp_nbrow * sizeof(doublecomplex) ); } } /* parallel for i ... */ tt_end = SuperLU_timer_(); LookAheadRowSepTimer += tt_end-tt_start; #if 0 LookAheadRowSepMOP += 2*knsupc*(lookAheadFullRow[lookAheadBlk-1]+Remain_info[RemainBlk-1].FullRow ); #else int_t lnbrow, rnbrow; lnbrow = lookAheadBlk==0 ? 0 : lookAheadFullRow[lookAheadBlk-1]; rnbrow = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow; nbrow = lnbrow + rnbrow; LookAheadRowSepMOP += 2*knsupc*(nbrow); #endif ldu =0; full =1; /*updating lookahead rows */ tt_start = SuperLU_timer_(); #if 0 nbrow = lookAheadFullRow[lookAheadBlk-1]+Remain_info[RemainBlk-1].FullRow; #endif if ( nbrow>0 ) { /* * counting U blocks */ ncols=0; ldu=0; full=1; int temp_ncols=0; for (j = jj0; j < nub; ++j) { temp_ncols=0; arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc, iukp0,rukp0,usub,perm_u,xsup,grid ); Ublock_info[j].iukp = iukp; Ublock_info[j].rukp = rukp; Ublock_info[j].jb = jb; /* Prepare to call GEMM. */ jj = iukp; for (; jj < iukp+nsupc; ++jj) { segsize = klst - usub[jj]; if ( segsize ) { ++temp_ncols; if ( segsize != ldu ) full = 0; if ( segsize > ldu ) ldu = segsize; } } Ublock_info[j].full_u_cols = temp_ncols; ncols += temp_ncols; } /* Now doing prefix sum on on full_u_cols */ for ( j = jj0+1; j < nub; ++j) { Ublock_info[j].full_u_cols += Ublock_info[j-1].full_u_cols; } tempu = bigU; #ifdef _OPENMP #pragma omp parallel for private(j,iukp,rukp,tempu, jb, nsupc,ljb,segsize,\ lead_zero, jj, i) \ default (shared) schedule(SCHEDULE_STRATEGY) #endif for (j = jj0; j < nub; ++j) { if(j==jj0) tempu = bigU; else tempu = bigU + ldu*Ublock_info[j-1].full_u_cols; /* == processing each of the remaining columns == */ arrive_at_ublock(j,&iukp,&rukp,&jb,&ljb,&nsupc, iukp0,rukp0,usub,perm_u,xsup,grid); for (jj = iukp; jj < iukp+nsupc; ++jj) { segsize = klst - usub[jj]; if ( segsize ) { lead_zero = ldu - segsize; for (i = 0; i < lead_zero; ++i) tempu[i] = zero; tempu += lead_zero; for (i = 0; i < segsize; ++i) tempu[i] = uval[rukp+i]; rukp += segsize; tempu += segsize; } } rukp -= usub[iukp - 1]; /* Return to start of U(k,j). */ } /* parallel for j:jjj_st..jjj */ tempu = bigU; //setting it to starting of the matrix } /* if(nbrow>0) */ tt_end = SuperLU_timer_(); GatherTimer += tt_end-tt_start; GatherMOP += 2*ldu*ncols; int Lnbrow = lookAheadBlk==0 ? 0 :lookAheadFullRow[lookAheadBlk-1]; int Rnbrow = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow; int jj_cpu=nub; /*limit between CPU and GPU */ tempv = bigV; if (Lnbrow>0 && ldu >0 && ncols>0) { ncols = Ublock_info[nub-1].full_u_cols; schur_flop_counter += 2 * (double)Lnbrow * (double)ldu * (double)ncols; stat->ops[FACT] += 2 * (double)Lnbrow * (double)ldu * (double)ncols; tt_start = SuperLU_timer_(); #ifdef _OPENMP #pragma omp parallel for default (shared) \ private (j,i,lb,rukp,iukp,jb,nsupc,ljb,lptr,ib,temp_nbrow,cum_nrow) \ schedule(dynamic) #endif for (int ij = 0; ij < lookAheadBlk*(nub-jj0); ++ij) { int j = ij/lookAheadBlk + jj0; int lb = ij%lookAheadBlk; #ifdef _OPENMP int thread_id = omp_get_thread_num(); #else int thread_id = 0; #endif int* indirect_thread = indirect + ldt*thread_id; int* indirect2_thread = indirect2 + ldt*thread_id; doublecomplex* tempv1 = bigV + thread_id*ldt*ldt; /* Getting U block information */ /* unsigned long long ut_start, ut_end; */ int_t rukp = Ublock_info[j].rukp; int_t iukp = Ublock_info[j].iukp; int jb = Ublock_info[j].jb; int nsupc = SuperSize(jb); int ljb = LBj (jb, grid); int st_col; int ncols; if (j>jj0) { ncols = Ublock_info[j].full_u_cols-Ublock_info[j-1].full_u_cols; st_col = Ublock_info[j-1].full_u_cols; } else { ncols = Ublock_info[j].full_u_cols; st_col = 0; } /* Getting L block information */ int_t lptr = lookAhead_lptr[lb]; int ib = lookAhead_ib[lb]; int temp_nbrow = lsub[lptr+1]; lptr += LB_DESCRIPTOR; int cum_nrow = (lb==0 ? 0 : lookAheadFullRow[lb-1]); #if defined (USE_VENDOR_BLAS) zgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha, &lookAhead_L_buff[(knsupc-ldu)*Lnbrow+cum_nrow], &Lnbrow, &tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow, 1, 1); #else zgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha, &lookAhead_L_buff[(knsupc-ldu)*Lnbrow+cum_nrow], &Lnbrow, &tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow); #endif if ( ib < jb ) { zscatter_u ( ib, jb, nsupc, iukp,xsup, klst, temp_nbrow, lptr, temp_nbrow,lsub, usub, tempv1, Ufstnz_br_ptr, Unzval_br_ptr, grid ); } else { zscatter_l ( ib, ljb, nsupc,iukp,xsup,klst,temp_nbrow,lptr, temp_nbrow,usub,lsub,tempv1, indirect_thread, indirect2_thread, Lrowind_bc_ptr,Lnzval_bc_ptr,grid ); } } /* for ij = ... */ tt_end = SuperLU_timer_(); LookAheadGEMMTimer += tt_end- tt_start; LookAheadGEMMFlOp += 2 * (double ) Lnbrow * (double )ldu * (double )ncols; stat->ops[FACT] += 2 * (double ) Lnbrow * (double )ldu * (double )ncols; LookAheadScatterTimer += tt_end-tt_start; LookAheadScatterMOP += 3*Lnbrow*ncols; } /* if Lnbrow < ... */ /*************************************************************** * Updating remaining rows and column on CPU ***************************************************************/ Rnbrow = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow; ncols = jj_cpu==0 ? 0 : Ublock_info[jj_cpu-1].full_u_cols; schur_flop_counter += 2 * (double)Rnbrow * (double)ldu * (double)ncols; stat->ops[FACT] += 2 * (double)Rnbrow * (double)ldu * (double)ncols; tt_start = SuperLU_timer_(); #ifdef _OPENMP #pragma omp parallel for default (shared) \ private (j,i,lb,rukp,iukp,jb,nsupc,ljb,lptr,ib,temp_nbrow,cum_nrow) \ schedule(dynamic) #endif for (int ij = 0; ij < RemainBlk*(jj_cpu-jj0); ++ij) { int j = ij / RemainBlk + jj0; int lb = ij % RemainBlk; #ifdef _OPENMP int thread_id = omp_get_thread_num(); #else int thread_id = 0; #endif int* indirect_thread = indirect + ldt*thread_id; int* indirect2_thread = indirect2 + ldt*thread_id; doublecomplex* tempv1 = bigV + thread_id*ldt*ldt; /* Getting U block information */ /* unsigned long long ut_start, ut_end; */ int_t rukp = Ublock_info[j].rukp; int_t iukp = Ublock_info[j].iukp; int jb = Ublock_info[j].jb; int nsupc = SuperSize(jb); int ljb = LBj (jb, grid); int st_col; int ncols; if (j>jj0) { ncols = Ublock_info[j].full_u_cols-Ublock_info[j-1].full_u_cols; st_col = Ublock_info[j-1].full_u_cols; } else { ncols = Ublock_info[j].full_u_cols; st_col = 0; } /* Getting L block information */ int_t lptr = Remain_info[lb].lptr; int ib = Remain_info[lb].ib; int temp_nbrow = lsub[lptr+1]; lptr += LB_DESCRIPTOR; int cum_nrow = (lb==0 ? 0 : Remain_info[lb-1].FullRow); /* calling GEMM */ #if defined (USE_VENDOR_BLAS) zgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha, &Remain_L_buff[(knsupc-ldu)*Rnbrow+cum_nrow], &Rnbrow, &tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow, 1, 1); #else zgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha, &Remain_L_buff[(knsupc-ldu)*Rnbrow+cum_nrow], &Rnbrow, &tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow); #endif /* Now scattering the block */ if ( ib<jb ) { zscatter_u ( ib, jb, nsupc, iukp,xsup, klst, temp_nbrow, lptr, temp_nbrow,lsub, usub, tempv1, Ufstnz_br_ptr, Unzval_br_ptr, grid ); } else { zscatter_l ( ib, ljb, nsupc,iukp,xsup,klst,temp_nbrow,lptr, temp_nbrow,usub,lsub,tempv1, indirect_thread, indirect2_thread, Lrowind_bc_ptr,Lnzval_bc_ptr,grid ); } } /* for (int ij =... */ } /* if k L(:,k) and U(k,:) are not empty */
GB_unop__round_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__round_fc64_fc64) // op(A') function: GB (_unop_tran__round_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cround (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cround (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cround (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ROUND || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__round_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cround (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cround (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__round_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
philosophen.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> // number of philosophers #define N 5 // left neighbour #define LEFT (id) // right neighbour #define RIGHT ((id + 1) % num_threads) #define TRUE 1 #define FALSE 0 // Global variables int num_threads; omp_lock_t forks[N]; void think(int philosopher) { printf("%d is thinking.\n", philosopher); } void eat(int philosopher) { printf("%d is eating.\n", philosopher); } void philosopher(int id) { while(TRUE) { think(id); // get forks //TODO let each philosopher eat with tow forks, the left and right one! //omp_set_lock(&forks[LEFT]); //omp_set_lock(&forks[RIGHT]); eat(id); // put forks //omp_unset_lock(&forks[LEFT]); //omp_unset_lock(&forks[RIGHT]); } } int main (int argc, char *argv[]) { int i; int id; for (i = 0; i < N; i++){ omp_init_lock(&forks[i]); } omp_set_num_threads(N); #pragma omp parallel private(id) shared(num_threads, forks) { id = omp_get_thread_num(); num_threads = omp_get_num_threads(); philosopher(id); } for (i = 0; i < N; i++){ omp_destroy_lock(&forks[i]); } return 0; }
ast-dump-openmp-atomic.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test(int i) { #pragma omp atomic ++i; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-atomic.c:3:1, line:6:1> line:3:6 test 'void (int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:11, col:15> col:15 used i 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:18, line:6:1> // CHECK-NEXT: `-OMPAtomicDirective {{.*}} <line:4:1, col:19> // CHECK-NEXT: `-UnaryOperator {{.*}} <line:5:3, col:5> 'int' prefix '++' // CHECK-NEXT: `-DeclRefExpr {{.*}} <col:5> 'int' lvalue ParmVar {{.*}} 'i' 'int'
GB_binop__rdiv_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rdiv_int32 // A.*B function (eWiseMult): GB_AemultB__rdiv_int32 // A*D function (colscale): GB_AxD__rdiv_int32 // D*A function (rowscale): GB_DxB__rdiv_int32 // C+=B function (dense accum): GB_Cdense_accumB__rdiv_int32 // C+=b function (dense accum): GB_Cdense_accumb__rdiv_int32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_int32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_int32 // C=scalar+B GB_bind1st__rdiv_int32 // C=scalar+B' GB_bind1st_tran__rdiv_int32 // C=A+scalar GB_bind2nd__rdiv_int32 // C=A'+scalar GB_bind2nd_tran__rdiv_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_IDIV_SIGNED (y, x, 32) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT32 || GxB_NO_RDIV_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rdiv_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rdiv_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rdiv_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rdiv_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rdiv_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rdiv_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__rdiv_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rdiv_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rdiv_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t bij = Bx [p] ; Cx [p] = GB_IDIV_SIGNED (bij, x, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rdiv_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; Cx [p] = GB_IDIV_SIGNED (y, aij, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 32) ; \ } GrB_Info GB_bind1st_tran__rdiv_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 32) ; \ } GrB_Info GB_bind2nd_tran__rdiv_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
CALPHADTieLineConcSolverTernary.h
#ifndef included_CALPHADTieLineConcSolverTernary #define included_CALPHADTieLineConcSolverTernary #include "NewtonSolver.h" #include "datatypes.h" namespace Thermo4PFM { /// solve for equilibrium compositions along a tie line /// passing through nominal composition class CALPHADTieLineConcSolverTernary : public NewtonSolver<5, CALPHADTieLineConcSolverTernary, JacobianDataType> { public: #ifdef HAVE_OPENMP_OFFLOAD #pragma omp declare target #endif /// input x: initial values for cL_0, cL_1, cS_0, cS_1 /// and phase fraction /// output x: ceqL_0, ceqL_1, ceqS_0, ceqS_1, and phi int ComputeConcentration(double* const x, const double tol, const int max_iters, const double alpha = 1.) { return NewtonSolver::ComputeSolution(x, tol, max_iters, alpha); } /// setup model paramater values to be used by solver, /// at a given temperature, including nominal composition /// c0, c1 void setup(const double c0, const double c1, const double RTinv, const CalphadDataType* const L_AB_L, const CalphadDataType* const L_AC_L, const CalphadDataType* const L_BC_L, const CalphadDataType* const L_AB_S, const CalphadDataType* const L_AC_S, const CalphadDataType* const L_BC_S, const CalphadDataType* const L_ABC_L, const CalphadDataType* const L_ABC_S, const CalphadDataType* const fA, const CalphadDataType* const fB, const CalphadDataType* const fC); /// evaluate RHS of the system of eqautions to solve for /// specific to this solver void RHS(const double* const x, double* const fvec); /// evaluate Jacobian of system of equations /// specific to this solver void Jacobian(const double* const x, JacobianDataType** const fjac); #ifdef HAVE_OPENMP_OFFLOAD #pragma omp end declare target #endif private: /// /// nominal composition (defining tie line) /// double conc_[2]; double RTinv_; double RT_; /// /// L coefficients for 2 possible phases (L and S) /// CalphadDataType L_AB_L_[4]; CalphadDataType L_AC_L_[4]; CalphadDataType L_BC_L_[4]; CalphadDataType L_ABC_L_[3]; CalphadDataType L_AB_S_[4]; CalphadDataType L_AC_S_[4]; CalphadDataType L_BC_S_[4]; CalphadDataType L_ABC_S_[3]; /// /// energies of 3 species, in two phase each /// CalphadDataType fA_[2]; CalphadDataType fB_[2]; CalphadDataType fC_[2]; }; } #endif
1body.h
/* * Copyright (C) 2004-2020 Edward F. Valeev * * This file is part of Libint. * * Libint is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Libint is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Libint. If not, see <http://www.gnu.org/licenses/>. * */ // standard C++ headers #include <atomic> #include <chrono> #include <cmath> #include <fstream> #include <iomanip> #include <iostream> #include <iterator> #include <memory> #include <mutex> #include <sstream> #include <thread> #include <unordered_map> #include <vector> // have BTAS library? #ifdef LIBINT2_HAVE_BTAS # include <btas/btas.h> #else // LIBINT2_HAVE_BTAS # error "libint2::lcao requires BTAS" #endif // Libint Gaussian integrals library #include <libint2/diis.h> #include <libint2/util/intpart_iter.h> #include <libint2/chemistry/sto3g_atomic_density.h> #include <libint2.hpp> #if defined(_OPENMP) #include <omp.h> #endif typedef btas::RangeNd<CblasRowMajor, std::array<long, 2>> Range2; typedef btas::RangeNd<CblasRowMajor, std::array<long, 3>> Range3; typedef btas::RangeNd<CblasRowMajor, std::array<long, 4>> Range4; typedef btas::Tensor<double, Range2> Tensor2d; typedef btas::Tensor<double, Range3> Tensor3d; typedef btas::Tensor<double, Range3> Tensor4d; typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> Matrix; // import dense, dynamically sized Matrix type from Eigen; // this is a matrix with row-major storage // (http://en.wikipedia.org/wiki/Row-major_order) // to meet the layout of the integrals returned by the Libint integral library typedef Eigen::DiagonalMatrix<double, Eigen::Dynamic, Eigen::Dynamic> DiagonalMatrix; using libint2::Shell; using libint2::libint2::Atom; using libint2::BasisSet; using libint2::Operator; using libint2::BraKet; std::vector<libint2::Atom> read_geometry(const std::string& filename); Matrix compute_soad(const std::vector<libint2::Atom>& atoms); // computes norm of shell-blocks of A Matrix compute_shellblock_norm(const BasisSet& obs, const Matrix& A); template <Operator obtype> std::array<Matrix, libint2::operator_traits<obtype>::nopers> compute_1body_ints( const BasisSet& obs, const std::vector<libint2::Atom>& atoms = std::vector<libint2::Atom>()); #if LIBINT2_DERIV_ONEBODY_ORDER template <Operator obtype> std::vector<Matrix> compute_1body_ints_deriv(unsigned deriv_order, const BasisSet& obs, const std::vector<libint2::Atom>& atoms); #endif // LIBINT2_DERIV_ONEBODY_ORDER template <libint2::Operator Kernel = libint2::Operator::coulomb> Matrix compute_schwarz_ints( const BasisSet& bs1, const BasisSet& bs2 = BasisSet(), bool use_2norm = false, // use infty norm by default typename libint2::operator_traits<Kernel>::oper_params_type params = libint2::operator_traits<Kernel>::default_params()); Matrix compute_do_ints(const BasisSet& bs1, const BasisSet& bs2 = BasisSet(), bool use_2norm = false // use infty norm by default ); using shellpair_list_t = std::unordered_map<size_t, std::vector<size_t>>; shellpair_list_t obs_shellpair_list; // shellpair list for OBS /// computes non-negligible shell pair list; shells \c i and \c j form a /// non-negligible /// pair if they share a center or the Frobenius norm of their overlap is /// greater than threshold shellpair_list_t compute_shellpair_list(const BasisSet& bs1, const BasisSet& bs2 = BasisSet(), double threshold = 1e-12); Matrix compute_2body_fock( const BasisSet& obs, const Matrix& D, double precision = std::numeric_limits< double>::epsilon(), // discard contributions smaller than this const Matrix& Schwarz = Matrix() // K_ij = sqrt(||(ij|ij)||_\infty); if // empty, do not Schwarz screen ); // an Fock builder that can accept densities expressed a separate basis Matrix compute_2body_fock_general( const BasisSet& obs, const Matrix& D, const BasisSet& D_bs, bool D_is_sheldiagonal = false, // set D_is_shelldiagonal if doing SOAD double precision = std::numeric_limits< double>::epsilon() // discard contributions smaller than this ); #if LIBINT2_DERIV_ERI_ORDER template <unsigned deriv_order> std::vector<Matrix> compute_2body_fock_deriv( const BasisSet& obs, const std::vector<libint2::Atom>& atoms, const Matrix& D, double precision = std::numeric_limits< double>::epsilon(), // discard contributions smaller than this const Matrix& Schwarz = Matrix() // K_ij = sqrt(||(ij|ij)||_\infty); if // empty, do not Schwarz screen ); #endif // LIBINT2_DERIV_ERI_ORDER // returns {X,X^{-1},S_condition_number_after_conditioning}, where // X is the generalized square-root-inverse such that X.transpose() * S * X = I // columns of Xinv is the basis conditioned such that // the condition number of its metric (Xinv.transpose . Xinv) < // S_condition_number_threshold std::tuple<Matrix, Matrix, double> conditioning_orthogonalizer( const Matrix& S, double S_condition_number_threshold); #ifdef LIBINT2_HAVE_BTAS #define HAVE_DENSITY_FITTING 1 struct DFFockEngine { const BasisSet& obs; const BasisSet& dfbs; DFFockEngine(const BasisSet& _obs, const BasisSet& _dfbs) : obs(_obs), dfbs(_dfbs) {} typedef btas::RangeNd<CblasRowMajor, std::array<long, 3>> Range3d; typedef btas::Tensor<double, Range3d> Tensor3d; Tensor3d xyK; // a DF-based builder, using coefficients of occupied MOs Matrix compute_2body_fock_dfC(const Matrix& Cocc); }; #endif // HAVE_DENSITY_FITTING namespace libint2 { int nthreads; /// fires off \c nthreads instances of lambda in parallel template <typename Lambda> void parallel_do(Lambda& lambda) { #ifdef _OPENMP #pragma omp parallel { auto thread_id = omp_get_thread_num(); lambda(thread_id); } #else // use C++11 threads std::vector<std::thread> threads; for (int thread_id = 0; thread_id != libint2::nthreads; ++thread_id) { if (thread_id != nthreads - 1) threads.push_back(std::thread(lambda, thread_id)); else lambda(thread_id); } // threads_id for (int thread_id = 0; thread_id < nthreads - 1; ++thread_id) threads[thread_id].join(); #endif } } int main(int argc, char* argv[]) { using std::cout; using std::cerr; using std::endl; try { /*** =========================== ***/ /*** initialize molecule ***/ /*** =========================== ***/ // read geometry from a file; by default read from h2o.xyz, else take // filename (.xyz) from the command line const auto filename = (argc > 1) ? argv[1] : "h2o.xyz"; const auto basisname = (argc > 2) ? argv[2] : "aug-cc-pVDZ"; bool do_density_fitting = false; #ifdef HAVE_DENSITY_FITTING do_density_fitting = (argc > 3); const auto dfbasisname = do_density_fitting ? argv[3] : ""; #endif std::vector<libint2::Atom> atoms = read_geometry(filename); // set up thread pool { using libint2::nthreads; auto nthreads_cstr = getenv("LIBINT_NUM_THREADS"); nthreads = 1; if (nthreads_cstr && strcmp(nthreads_cstr, "")) { std::istringstream iss(nthreads_cstr); iss >> nthreads; if (nthreads > 1 << 16 || nthreads <= 0) nthreads = 1; } #if defined(_OPENMP) omp_set_num_threads(nthreads); #endif std::cout << "Will scale over " << nthreads #if defined(_OPENMP) << " OpenMP" #else << " C++11" #endif << " threads" << std::endl; } // count the number of electrons auto nelectron = 0; for (auto i = 0; i < atoms.size(); ++i) nelectron += atoms[i].atomic_number; const auto ndocc = nelectron / 2; cout << "# of electrons = " << nelectron << endl; // compute the nuclear repulsion energy auto enuc = 0.0; for (auto i = 0; i < atoms.size(); i++) for (auto j = i + 1; j < atoms.size(); j++) { auto xij = atoms[i].x - atoms[j].x; auto yij = atoms[i].y - atoms[j].y; auto zij = atoms[i].z - atoms[j].z; auto r2 = xij * xij + yij * yij + zij * zij; auto r = sqrt(r2); enuc += atoms[i].atomic_number * atoms[j].atomic_number / r; } cout << "Nuclear repulsion energy = " << std::setprecision(15) << enuc << endl; libint2::Shell::do_enforce_unit_normalization(false); cout << "libint2::Atomic Cartesian coordinates (a.u.):" << endl; for (const auto& a : atoms) std::cout << a.atomic_number << " " << a.x << " " << a.y << " " << a.z << std::endl; BasisSet obs(basisname, atoms); cout << "orbital basis set rank = " << obs.nbf() << endl; #ifdef HAVE_DENSITY_FITTING BasisSet dfbs; if (do_density_fitting) { dfbs = BasisSet(dfbasisname, atoms); cout << "density-fitting basis set rank = " << dfbs.nbf() << endl; } #endif // HAVE_DENSITY_FITTING /*** =========================== ***/ /*** compute 1-e integrals ***/ /*** =========================== ***/ // initializes the Libint integrals library ... now ready to compute libint2::initialize(); // compute OBS non-negligible shell-pair list { obs_shellpair_list = compute_shellpair_list(obs); size_t nsp = 0; for (auto& sp : obs_shellpair_list) { nsp += sp.second.size(); } std::cout << "# of {all,non-negligible} shell-pairs = {" << obs.size() * (obs.size() + 1) / 2 << "," << nsp << "}" << std::endl; } // compute one-body integrals auto S = compute_1body_ints<Operator::overlap>(obs)[0]; auto T = compute_1body_ints<Operator::kinetic>(obs)[0]; auto V = compute_1body_ints<Operator::nuclear>(obs, libint2::make_point_charges(atoms))[0]; Matrix H = T + V; T.resize(0, 0); V.resize(0, 0); // compute orthogonalizer X such that X.transpose() . S . X = I Matrix X, Xinv; double XtX_condition_number; // condition number of "re-conditioned" // overlap obtained as Xinv.transpose() . Xinv // one should think of columns of Xinv as the conditioned basis // Re: name ... cond # (Xinv.transpose() . Xinv) = cond # (X.transpose() . // X) // by default assume can manage to compute with condition number of S <= // 1/eps // this is probably too optimistic, but in well-behaved cases even 10^11 is // OK double S_condition_number_threshold = 1.0 / std::numeric_limits<double>::epsilon(); std::tie(X, Xinv, XtX_condition_number) = conditioning_orthogonalizer(S, S_condition_number_threshold); Matrix D; Matrix C_occ; Matrix evals; { // use SOAD as the guess density const auto tstart = std::chrono::high_resolution_clock::now(); auto D_minbs = compute_soad(atoms); // compute guess in minimal basis BasisSet minbs("STO-3G", atoms); if (minbs == obs) D = D_minbs; else { // if basis != minimal basis, map non-representable SOAD guess // into the AO basis // by diagonalizing a Fock matrix std::cout << "projecting SOAD into AO basis ... "; auto F = H; F += compute_2body_fock_general( obs, D_minbs, minbs, true /* SOAD_D_is_shelldiagonal */, std::numeric_limits<double>::epsilon() // this is cheap, no reason // to be cheaper ); // solve F C = e S C by (conditioned) transformation to F' C' = e C', // where // F' = X.transpose() . F . X; the original C is obtained as C = X . C' Eigen::SelfAdjointEigenSolver<Matrix> eig_solver(X.transpose() * F * X); auto C = X * eig_solver.eigenvectors(); // compute density, D = C(occ) . C(occ)T C_occ = C.leftCols(ndocc); D = C_occ * C_occ.transpose(); const auto tstop = std::chrono::high_resolution_clock::now(); const std::chrono::duration<double> time_elapsed = tstop - tstart; std::cout << "done (" << time_elapsed.count() << " s)" << std::endl; } } // pre-compute data for Schwarz bounds auto K = compute_schwarz_ints<>(obs, BasisSet(), true); // prepare for density fitting #ifdef HAVE_DENSITY_FITTING std::unique_ptr<DFFockEngine> dffockengine( do_density_fitting ? new DFFockEngine(obs, dfbs) : nullptr); #endif // HAVE_DENSITY_FITTING /*** =========================== ***/ /*** SCF loop ***/ /*** =========================== ***/ const auto maxiter = 100; const auto conv = 1e-12; auto iter = 0; auto rms_error = 1.0; auto ediff_rel = 0.0; auto ehf = 0.0; auto n2 = D.cols() * D.rows(); libint2::DIIS<Matrix> diis(2); // start DIIS on second iteration // prepare for incremental Fock build ... Matrix D_diff = D; Matrix F = H; bool reset_incremental_fock_formation = false; bool incremental_Fbuild_started = false; double start_incremental_F_threshold = 1e-5; double next_reset_threshold = 0.0; size_t last_reset_iteration = 0; // ... unless doing DF, then use MO coefficients, hence not "incremental" if (do_density_fitting) start_incremental_F_threshold = 0.0; do { const auto tstart = std::chrono::high_resolution_clock::now(); ++iter; // Last iteration's energy and density auto ehf_last = ehf; Matrix D_last = D; if (not incremental_Fbuild_started && rms_error < start_incremental_F_threshold) { incremental_Fbuild_started = true; reset_incremental_fock_formation = false; last_reset_iteration = iter - 1; next_reset_threshold = rms_error / 1e1; std::cout << "== started incremental fock build" << std::endl; } if (reset_incremental_fock_formation || not incremental_Fbuild_started) { F = H; D_diff = D; } if (reset_incremental_fock_formation && incremental_Fbuild_started) { reset_incremental_fock_formation = false; last_reset_iteration = iter; next_reset_threshold = rms_error / 1e1; std::cout << "== reset incremental fock build" << std::endl; } // build a new Fock matrix if (not do_density_fitting) { // totally empirical precision variation, involves the condition number const auto precision_F = std::min( std::min(1e-3 / XtX_condition_number, 1e-7), std::max(rms_error / 1e4, std::numeric_limits<double>::epsilon())); F += compute_2body_fock(obs, D_diff, precision_F, K); } #if HAVE_DENSITY_FITTING else { // do DF F = H + dffockengine->compute_2body_fock_dfC(C_occ); } #else else { assert(false); } // do_density_fitting is true but HAVE_DENSITY_FITTING is not defined! // should not happen #endif // HAVE_DENSITY_FITTING // compute HF energy with the non-extrapolated Fock matrix ehf = D.cwiseProduct(H + F).sum(); ediff_rel = std::abs((ehf - ehf_last) / ehf); // compute SCF error Matrix FD_comm = F * D * S - S * D * F; rms_error = FD_comm.norm() / n2; if (rms_error < next_reset_threshold || iter - last_reset_iteration >= 8) reset_incremental_fock_formation = true; // DIIS extrapolate F Matrix F_diis = F; // extrapolated F cannot be used in incremental Fock // build; only used to produce the density // make a copy of the unextrapolated matrix diis.extrapolate(F_diis, FD_comm); // solve F C = e S C by (conditioned) transformation to F' C' = e C', // where // F' = X.transpose() . F . X; the original C is obtained as C = X . C' Eigen::SelfAdjointEigenSolver<Matrix> eig_solver(X.transpose() * F_diis * X); evals = eig_solver.eigenvalues(); auto C = X * eig_solver.eigenvectors(); // compute density, D = C(occ) . C(occ)T C_occ = C.leftCols(ndocc); D = C_occ * C_occ.transpose(); D_diff = D - D_last; const auto tstop = std::chrono::high_resolution_clock::now(); const std::chrono::duration<double> time_elapsed = tstop - tstart; if (iter == 1) std::cout << "\n\nIter E(HF) D(E)/E " "RMS([F,D])/nn Time(s)\n"; printf(" %02d %20.12f %20.12e %20.12e %10.5lf\n", iter, ehf + enuc, ediff_rel, rms_error, time_elapsed.count()); } while (((ediff_rel > conv) || (rms_error > conv)) && (iter < maxiter)); printf("** Hartree-Fock energy = %20.12f\n", ehf + enuc); auto Mu = compute_1body_ints<Operator::emultipole2>(obs); std::array<double, 3> mu; std::array<double, 6> qu; for (int xyz = 0; xyz != 3; ++xyz) mu[xyz] = -2 * D.cwiseProduct(Mu[xyz + 1]) .sum(); // 2 = alpha + beta, -1 = electron charge for (int k = 0; k != 6; ++k) qu[k] = -2 * D.cwiseProduct(Mu[k + 4]) .sum(); // 2 = alpha + beta, -1 = electron charge std::cout << "** edipole = "; std::copy(mu.begin(), mu.end(), std::ostream_iterator<double>(std::cout, " ")); std::cout << std::endl; std::cout << "** equadrupole = "; std::copy(qu.begin(), qu.end(), std::ostream_iterator<double>(std::cout, " ")); std::cout << std::endl; { // compute force #if LIBINT2_DERIV_ONEBODY_ORDER // compute 1-e forces Matrix F1 = Matrix::Zero(atoms.size(), 3); Matrix F_Pulay = Matrix::Zero(atoms.size(), 3); ////////// // one-body contributions to the forces ////////// auto T1 = compute_1body_ints_deriv<Operator::kinetic>(1, obs, atoms); auto V1 = compute_1body_ints_deriv<Operator::nuclear>(1, obs, atoms); for (auto atom = 0, i = 0; atom != atoms.size(); ++atom) { for (auto xyz = 0; xyz != 3; ++xyz, ++i) { auto force = 2 * (T1[i] + V1[i]).cwiseProduct(D).sum(); F1(atom, xyz) += force; } } ////////// // Pulay force ////////// // orbital energy density DiagonalMatrix evals_occ(evals.topRows(ndocc)); Matrix W = C_occ * evals_occ * C_occ.transpose(); auto S1 = compute_1body_ints_deriv<Operator::overlap>(1, obs, atoms); for (auto atom = 0, i = 0; atom != atoms.size(); ++atom) { for (auto xyz = 0; xyz != 3; ++xyz, ++i) { auto force = 2 * S1[i].cwiseProduct(W).sum(); F_Pulay(atom, xyz) -= force; } } std::cout << "** 1-body forces = "; for (int atom = 0; atom != atoms.size(); ++atom) for (int xyz = 0; xyz != 3; ++xyz) std::cout << F1(atom, xyz) << " "; std::cout << std::endl; std::cout << "** Pulay forces = "; for (int atom = 0; atom != atoms.size(); ++atom) for (int xyz = 0; xyz != 3; ++xyz) std::cout << F_Pulay(atom, xyz) << " "; std::cout << std::endl; #endif // LIBINT2_DERIV_ONEBODY_ORDER #if LIBINT2_DERIV_ERI_ORDER // compute 2-e forces Matrix F2 = Matrix::Zero(atoms.size(), 3); ////////// // two-body contributions to the forces ////////// auto G1 = compute_2body_fock_deriv<1>(obs, atoms, D); for (auto atom = 0, i = 0; atom != atoms.size(); ++atom) { for (auto xyz = 0; xyz != 3; ++xyz, ++i) { // identity prefactor since E(HF) = trace(H + F, D) = trace(2H + G, D) auto force = G1[i].cwiseProduct(D).sum(); F2(atom, xyz) += force; } } std::cout << "** 2-body forces = "; for (int atom = 0; atom != atoms.size(); ++atom) for (int xyz = 0; xyz != 3; ++xyz) std::cout << F2(atom, xyz) << " "; std::cout << std::endl; #endif // if support 1-e and 2-e derivatives compute nuclear repulsion force and the // total force #if LIBINT2_DERIV_ONEBODY_ORDER && LIBINT2_DERIV_ERI_ORDER // compute nuclear repulsion forces Matrix FN = Matrix::Zero(atoms.size(), 3); ////////// // nuclear repulsion contribution to the forces ////////// for (auto a1 = 1; a1 != atoms.size(); ++a1) { const auto& atom1 = atoms[a1]; for (auto a2 = 0; a2 < a1; ++a2) { const auto& atom2 = atoms[a2]; auto x12 = atom1.x - atom2.x; auto y12 = atom1.y - atom2.y; auto z12 = atom1.z - atom2.z; auto r12_2 = x12 * x12 + y12 * y12 + z12 * z12; auto r12 = sqrt(r12_2); auto r12_3 = r12 * r12_2; auto z1z2_over_r12_3 = atom1.atomic_number * atom2.atomic_number / r12_3; auto fx = -x12 * z1z2_over_r12_3; auto fy = -y12 * z1z2_over_r12_3; auto fz = -z12 * z1z2_over_r12_3; FN(a1, 0) += fx; FN(a1, 1) += fy; FN(a1, 2) += fz; FN(a2, 0) -= fx; FN(a2, 1) -= fy; FN(a2, 2) -= fz; } } std::cout << "** nuclear repulsion forces = "; for (int atom = 0; atom != atoms.size(); ++atom) for (int xyz = 0; xyz != 3; ++xyz) std::cout << FN(atom, xyz) << " "; std::cout << std::endl; auto F = F1 + F_Pulay + F2 + FN; std::cout << "** Hartree-Fock forces = "; for (int atom = 0; atom != atoms.size(); ++atom) for (int xyz = 0; xyz != 3; ++xyz) std::cout << F(atom, xyz) << " "; std::cout << std::endl; #endif } { // compute hessian const auto ncoords = 3 * atoms.size(); // # of elems in upper triangle const auto nelem = ncoords * (ncoords+1) / 2; #if LIBINT2_DERIV_ONEBODY_ORDER > 1 // compute 1-e hessian Matrix H1 = Matrix::Zero(ncoords, ncoords); Matrix H_Pulay = Matrix::Zero(ncoords, ncoords); ////////// // one-body contributions to the hessian ////////// auto T2 = compute_1body_ints_deriv<Operator::kinetic>(2, obs, atoms); auto V2 = compute_1body_ints_deriv<Operator::nuclear>(2, obs, atoms); for (auto row = 0, i = 0; row != ncoords; ++row) { for (auto col = row; col != ncoords; ++col, ++i) { auto hess = 2 * (T2[i] + V2[i]).cwiseProduct(D).sum(); H1(row, col) += hess; } } ////////// // Pulay hessian ////////// // orbital energy density DiagonalMatrix evals_occ(evals.topRows(ndocc)); Matrix W = C_occ * evals_occ * C_occ.transpose(); auto S2 = compute_1body_ints_deriv<Operator::overlap>(2, obs, atoms); for (auto row = 0, i = 0; row != ncoords; ++row) { for (auto col = row; col != ncoords; ++col, ++i) { auto hess = 2 * S2[i].cwiseProduct(W).sum(); H_Pulay(row, col) -= hess; } } std::cout << "** 1-body hessian = "; for (auto row = 0, i = 0; row != ncoords; ++row) { for (auto col = row; col != ncoords; ++col) { std::cout << H1(row, col) << " "; } } std::cout << std::endl; std::cout << "** Pulay hessian = "; for (auto row = 0, i = 0; row != ncoords; ++row) { for (auto col = row; col != ncoords; ++col) { std::cout << H_Pulay(row, col) << " "; } } std::cout << std::endl; #endif // LIBINT2_DERIV_ONEBODY_ORDER > 1 #if LIBINT2_DERIV_ERI_ORDER > 1 // compute 2-e forces Matrix H2 = Matrix::Zero(ncoords, ncoords); ////////// // two-body contributions to the forces ////////// auto G2 = compute_2body_fock_deriv<2>(obs, atoms, D); for (auto row = 0, i = 0; row != ncoords; ++row) { for (auto col = row; col != ncoords; ++col, ++i) { // identity prefactor since E(HF) = trace(H + F, D) = trace(2H + G, D) auto hess = G2[i].cwiseProduct(D).sum(); H2(row, col) += hess; } } std::cout << "** 2-body hessian = "; for (auto row = 0, i = 0; row != ncoords; ++row) { for (auto col = row; col != ncoords; ++col) { std::cout << H2(row, col) << " "; } } std::cout << std::endl; #endif // if support 1-e and 2-e 2nd derivatives compute nuclear repulsion hessian and // the total hessian #if LIBINT2_DERIV_ONEBODY_ORDER > 1 && LIBINT2_DERIV_ERI_ORDER > 1 // compute nuclear repulsion hessian // NB only the upper triangle is computed!!! Matrix HN = Matrix::Zero(ncoords, ncoords); ////////// // nuclear repulsion contribution to the hessian ////////// for (auto a1 = 1; a1 != atoms.size(); ++a1) { const auto& atom1 = atoms[a1]; for (auto a2 = 0; a2 < a1; ++a2) { const auto& atom2 = atoms[a2]; auto x12 = atom1.x - atom2.x; auto y12 = atom1.y - atom2.y; auto z12 = atom1.z - atom2.z; auto x12_2 = x12 * x12; auto y12_2 = y12 * y12; auto z12_2 = z12 * z12; auto r12_2 = x12 * x12 + y12 * y12 + z12 * z12; auto r12 = sqrt(r12_2); auto r12_5 = r12 * r12_2 * r12_2; auto z1z2_over_r12_5 = atom1.atomic_number * atom2.atomic_number / r12_5; HN(3*a1 + 0, 3*a1 + 0) += z1z2_over_r12_5 * (3*x12_2 - r12_2); HN(3*a1 + 1, 3*a1 + 1) += z1z2_over_r12_5 * (3*y12_2 - r12_2); HN(3*a1 + 2, 3*a1 + 2) += z1z2_over_r12_5 * (3*z12_2 - r12_2); HN(3*a1 + 0, 3*a1 + 1) += z1z2_over_r12_5 * (3*x12*y12); HN(3*a1 + 0, 3*a1 + 2) += z1z2_over_r12_5 * (3*x12*z12); HN(3*a1 + 1, 3*a1 + 2) += z1z2_over_r12_5 * (3*y12*z12); HN(3*a2 + 0, 3*a2 + 0) += z1z2_over_r12_5 * (3*x12_2 - r12_2); HN(3*a2 + 1, 3*a2 + 1) += z1z2_over_r12_5 * (3*y12_2 - r12_2); HN(3*a2 + 2, 3*a2 + 2) += z1z2_over_r12_5 * (3*z12_2 - r12_2); HN(3*a2 + 0, 3*a2 + 1) += z1z2_over_r12_5 * (3*x12*y12); HN(3*a2 + 0, 3*a2 + 2) += z1z2_over_r12_5 * (3*x12*z12); HN(3*a2 + 1, 3*a2 + 2) += z1z2_over_r12_5 * (3*y12*z12); HN(3*a2 + 0, 3*a1 + 0) -= z1z2_over_r12_5 * (3*x12_2 - r12_2); HN(3*a2 + 1, 3*a1 + 1) -= z1z2_over_r12_5 * (3*y12_2 - r12_2); HN(3*a2 + 2, 3*a1 + 2) -= z1z2_over_r12_5 * (3*z12_2 - r12_2); HN(3*a2 + 1, 3*a1 + 0) -= z1z2_over_r12_5 * (3*y12*x12); HN(3*a2 + 2, 3*a1 + 0) -= z1z2_over_r12_5 * (3*z12*x12); HN(3*a2 + 2, 3*a1 + 1) -= z1z2_over_r12_5 * (3*z12*y12); HN(3*a2 + 0, 3*a1 + 1) -= z1z2_over_r12_5 * (3*x12*y12); HN(3*a2 + 0, 3*a1 + 2) -= z1z2_over_r12_5 * (3*x12*z12); HN(3*a2 + 1, 3*a1 + 2) -= z1z2_over_r12_5 * (3*y12*z12); } } std::cout << "** nuclear repulsion hessian = "; for (auto row = 0, i = 0; row != ncoords; ++row) { for (auto col = row; col != ncoords; ++col) { std::cout << HN(row, col) << " "; } } std::cout << std::endl; auto H = H1 + H_Pulay + H2 + HN; std::cout << "** Hartree-Fock hessian = "; for (auto row = 0, i = 0; row != ncoords; ++row) { for (auto col = row; col != ncoords; ++col) { std::cout << H(row, col) << " "; } } std::cout << std::endl; #endif } libint2::finalize(); // done with libint } // end of try block; if any exceptions occurred, report them and exit // cleanly catch (const char* ex) { cerr << "caught exception: " << ex << endl; return 1; } catch (std::string& ex) { cerr << "caught exception: " << ex << endl; return 1; } catch (std::exception& ex) { cerr << ex.what() << endl; return 1; } catch (...) { cerr << "caught unknown exception\n"; return 1; } return 0; } std::vector<libint2::Atom> read_geometry(const std::string& filename) { std::cout << "Will read geometry from " << filename << std::endl; std::ifstream is(filename); if (not is.good()) { char errmsg[256] = "Could not open file "; strncpy(errmsg + 20, filename.c_str(), 235); errmsg[255] = '\0'; throw std::ios_base::failure(errmsg); } // to prepare for MPI parallelization, we will read the entire file into a // string that can be // broadcast to everyone, then converted to an std::istringstream object that // can be used just like std::ifstream std::ostringstream oss; oss << is.rdbuf(); // use ss.str() to get the entire contents of the file as an std::string // broadcast // then make an std::istringstream in each process std::istringstream iss(oss.str()); // check the extension: if .xyz, assume the standard XYZ format, otherwise // throw an exception if (filename.rfind(".xyz") != std::string::npos) return libint2::read_dotxyz(iss); else throw "only .xyz files are accepted"; } // computes Superposition-Of-libint2::Atomic-Densities guess for the molecular density // matrix // in minimal basis; occupies subshells by smearing electrons evenly over the // orbitals Matrix compute_soad(const std::vector<libint2::Atom>& atoms) { // compute number of atomic orbitals size_t nao = 0; for (const auto& atom : atoms) { const auto Z = atom.atomic_number; nao += libint2::sto3g_num_ao(Z); } // compute the minimal basis density Matrix D = Matrix::Zero(nao, nao); size_t ao_offset = 0; // first AO of this atom for (const auto& atom : atoms) { const auto Z = atom.atomic_number; const auto& occvec = libint2::sto3g_ao_occupation_vector(Z); for(const auto& occ: occvec) { D(ao_offset, ao_offset) = occ; ++ao_offset; } } return D * 0.5; // we use densities normalized to # of electrons/2 } Matrix compute_shellblock_norm(const BasisSet& obs, const Matrix& A) { const auto nsh = obs.size(); Matrix Ash(nsh, nsh); auto shell2bf = obs.shell2bf(); for (size_t s1 = 0; s1 != nsh; ++s1) { const auto& s1_first = shell2bf[s1]; const auto& s1_size = obs[s1].size(); for (size_t s2 = 0; s2 != nsh; ++s2) { const auto& s2_first = shell2bf[s2]; const auto& s2_size = obs[s2].size(); Ash(s1, s2) = A.block(s1_first, s2_first, s1_size, s2_size) .lpNorm<Eigen::Infinity>(); } } return Ash; } template <Operator obtype, typename OperatorParams> std::array<Matrix, libint2::operator_traits<obtype>::nopers> compute_1body_ints( const BasisSet& obs, OperatorParams params) { const auto n = obs.nbf(); const auto nshells = obs.size(); using libint2::nthreads; typedef std::array<Matrix, libint2::operator_traits<obtype>::nopers> result_type; const unsigned int nopers = libint2::operator_traits<obtype>::nopers; result_type result; for (auto& r : result) r = Matrix::Zero(n, n); // construct the 1-body integrals engine std::vector<libint2::Engine> engines(nthreads); engines[0] = libint2::Engine(obtype, obs.max_nprim(), obs.max_l(), 0); // nuclear attraction ints engine needs to know where the charges sit ... // the nuclei are charges in this case; in QM/MM there will also be classical // charges if (obtype == Operator::nuclear || obtype == Operator::erf_nuclear || obtype == Operator::erfc_nuclear) { engines[0].set_params(params); } for (size_t i = 1; i != nthreads; ++i) { engines[i] = engines[0]; } auto shell2bf = obs.shell2bf(); auto compute = [&](int thread_id) { const auto& buf = engines[thread_id].results(); // loop over unique shell pairs, {s1,s2} such that s1 >= s2 // this is due to the permutational symmetry of the real integrals over // Hermitian operators: (1|2) = (2|1) for (auto s1 = 0l, s12 = 0l; s1 != nshells; ++s1) { auto bf1 = shell2bf[s1]; // first basis function in this shell auto n1 = obs[s1].size(); auto s1_offset = s1 * (s1+1) / 2; for(auto s2: obs_shellpair_list[s1]) { auto s12 = s1_offset + s2; if (s12 % nthreads != thread_id) continue; auto bf2 = shell2bf[s2]; auto n2 = obs[s2].size(); auto n12 = n1 * n2; // compute shell pair; return is the pointer to the buffer engines[thread_id].compute(obs[s1], obs[s2]); for (unsigned int op = 0; op != nopers; ++op) { // "map" buffer to a const Eigen Matrix, and copy it to the // corresponding blocks of the result Eigen::Map<const Matrix> buf_mat(buf[op], n1, n2); result[op].block(bf1, bf2, n1, n2) = buf_mat; if (s1 != s2) // if s1 >= s2, copy {s1,s2} to the corresponding // {s2,s1} block, note the transpose! result[op].block(bf2, bf1, n2, n1) = buf_mat.transpose(); } } } }; // compute lambda libint2::parallel_do(compute); return result; } #if LIBINT2_DERIV_ONEBODY_ORDER template <Operator obtype> std::vector<Matrix> compute_1body_ints_deriv(unsigned deriv_order, const BasisSet& obs, const std::vector<libint2::Atom>& atoms) { using libint2::nthreads; const auto n = obs.nbf(); const auto nshells = obs.size(); constexpr auto nopers = libint2::operator_traits<obtype>::nopers; const auto nresults = nopers * libint2::num_geometrical_derivatives(atoms.size(), deriv_order); typedef std::vector<Matrix> result_type; result_type result(nresults); for (auto& r : result) r = Matrix::Zero(n, n); // construct the 1-body integrals engine std::vector<libint2::Engine> engines(nthreads); engines[0] = libint2::Engine(obtype, obs.max_nprim(), obs.max_l(), deriv_order); // nuclear attraction ints engine needs to know where the charges sit ... // the nuclei are charges in this case; in QM/MM there will also be classical // charges if (obtype == Operator::nuclear) { std::vector<std::pair<double, std::array<double, 3>>> q; for (const auto& atom : atoms) { q.push_back({static_cast<double>(atom.atomic_number), {{atom.x, atom.y, atom.z}}}); } engines[0].set_params(q); } for (size_t i = 1; i != nthreads; ++i) { engines[i] = engines[0]; } auto shell2bf = obs.shell2bf(); auto shell2atom = obs.shell2atom(atoms); const auto natoms = atoms.size(); const auto two_times_ncoords = 6*natoms; const auto nderivcenters_shset = 2 + ((obtype == Operator::nuclear) ? natoms : 0); auto compute = [&](int thread_id) { const auto& buf = engines[thread_id].results(); // loop over unique shell pairs, {s1,s2} such that s1 >= s2 // this is due to the permutational symmetry of the real integrals over // Hermitian operators: (1|2) = (2|1) for (auto s1 = 0l, s12 = 0l; s1 != nshells; ++s1) { auto bf1 = shell2bf[s1]; // first basis function in this shell auto n1 = obs[s1].size(); auto atom1 = shell2atom[s1]; assert(atom1 != -1); auto s1_offset = s1 * (s1+1) / 2; for(auto s2: obs_shellpair_list[s1]) { auto s12 = s1_offset + s2; if (s12 % nthreads != thread_id) continue; auto bf2 = shell2bf[s2]; auto n2 = obs[s2].size(); auto atom2 = shell2atom[s2]; auto n12 = n1 * n2; // compute shell pair; return is the pointer to the buffer engines[thread_id].compute(obs[s1], obs[s2]); // "copy" lambda copies shell set \c idx to the operator matrix with // index \c op auto add_shellset_to_dest = [&](std::size_t op, std::size_t idx, double scale = 1.0) { // "map" buffer to a const Eigen Matrix, and copy it to the // corresponding blocks of the result Eigen::Map<const Matrix> buf_mat(buf[idx], n1, n2); if (scale == 1.0) result[op].block(bf1, bf2, n1, n2) += buf_mat; else result[op].block(bf1, bf2, n1, n2) += scale * buf_mat; if (s1 != s2) { // if s1 >= s2, copy {s1,s2} to the corresponding // {s2,s1} block, note the transpose! if (scale == 1.0) result[op].block(bf2, bf1, n2, n1) += buf_mat.transpose(); else result[op].block(bf2, bf1, n2, n1) += scale * buf_mat.transpose(); } }; switch (deriv_order) { case 0: for (std::size_t op = 0; op != nopers; ++op) { add_shellset_to_dest(op, op); } break; // map deriv quanta for this shell pair to the overall deriv quanta // // easiest to explain with example: // in sto-3g water shells 0 1 2 sit on atom 0, shells 3 and 4 on atoms // 1 and 2 respectively // each call to engine::compute for nuclear ints will return // derivatives // with respect to 15 coordinates, obtained as 3 (x,y,z) times 2 + 3 = // 5 centers // (2 centers on which shells sit + 3 nuclear charges) // (for overlap, kinetic, and emultipole ints we there are only 6 // coordinates // since the operator is coordinate-independent, or derivatives with // respect to // the operator coordinates are not computed) // case 1: { std::size_t shellset_idx = 0; for (auto c = 0; c != nderivcenters_shset; ++c) { auto atom = (c == 0) ? atom1 : ((c == 1) ? atom2 : c - 2); auto op_start = 3 * atom * nopers; auto op_fence = op_start + nopers; for (auto xyz = 0; xyz != 3; ++xyz, op_start += nopers, op_fence += nopers) { for (unsigned int op = op_start; op != op_fence; ++op, ++shellset_idx) { add_shellset_to_dest(op, shellset_idx); } } } } break; case 2: { // // must pay attention to symmetry when computing 2nd and higher-order derivs // e.g. d2 (s1|s2) / dX dY involves several cases: // 1. only s1 (or only s2) depends on X AND Y (i.e. X and Y refer to same atom) => // d2 (s1|s2) / dX dY = (d2 s1 / dX dY | s2) // 2. s1 depends on X only, s2 depends on Y only (or vice versa) => // d2 (s1|s2) / dX dY = (d s1 / dX | d s2 / dY) // 3. s1 AND s2 depend on X AND Y (i.e. X and Y refer to same atom) => // case A: X != Y // d2 (s1|s2) / dX dY = (d2 s1 / dX dY | s2) + (d s1 / dX | d s2 / dY) // + (d s1 / dY | d s2 / dX) + (s1| d2 s2 / dX dY ) // case B: X == Y // d2 (s1|s2) / dX2 = (d2 s1 / dX2 | s2) + 2 (d s1 / dX | d s2 / dX) // + (s1| d2 s2 / dX2 ) // computes upper triangle index // n2 = matrix size times 2 // i,j = (unordered) indices #define upper_triangle_index(n2, i, j) \ (std::min((i), (j))) * ((n2) - (std::min((i), (j))) - 1) / 2 + \ (std::max((i), (j))) // look over shellsets in the order in which they appear std::size_t shellset_idx = 0; for (auto c1 = 0; c1 != nderivcenters_shset; ++c1) { auto a1 = (c1 == 0) ? atom1 : ((c1 == 1) ? atom2 : c1 - 2); auto coord1 = 3 * a1; for (auto xyz1 = 0; xyz1 != 3; ++xyz1, ++coord1) { for (auto c2 = c1; c2 != nderivcenters_shset; ++c2) { auto a2 = (c2 == 0) ? atom1 : ((c2 == 1) ? atom2 : c2 - 2); auto xyz2_start = (c1 == c2) ? xyz1 : 0; auto coord2 = 3 * a2 + xyz2_start; for (auto xyz2 = xyz2_start; xyz2 != 3; ++xyz2, ++coord2) { double scale = (coord1 == coord2 && c1 != c2) ? 2.0 : 1.0; const auto coord12 = upper_triangle_index(two_times_ncoords, coord1, coord2); auto op_start = coord12 * nopers; auto op_fence = op_start + nopers; for (auto op = op_start; op != op_fence; ++op, ++shellset_idx) { add_shellset_to_dest(op, shellset_idx, scale); } } } } } } break; #undef upper_triangle_index default: { assert(false && "not yet implemented"); using ShellSetDerivIterator = libint2::FixedOrderedIntegerPartitionIterator< std::vector<unsigned int>>; ShellSetDerivIterator shellset_diter(deriv_order, nderivcenters_shset); while (shellset_diter) { const auto& deriv = *shellset_diter; } } } // copy shell block switch } // s2 <= s1 } // s1 }; // compute lambda libint2::parallel_do(compute); return result; } #endif template <libint2::Operator Kernel> Matrix compute_schwarz_ints( const BasisSet& bs1, const BasisSet& _bs2, bool use_2norm, typename libint2::operator_traits<Kernel>::oper_params_type params) { const BasisSet& bs2 = (_bs2.empty() ? bs1 : _bs2); const auto nsh1 = bs1.size(); const auto nsh2 = bs2.size(); const auto bs1_equiv_bs2 = (&bs1 == &bs2); Matrix K = Matrix::Zero(nsh1, nsh2); // construct the 2-electron repulsion integrals engine using libint2::Engine; using libint2::nthreads; std::vector<Engine> engines(nthreads); // !!! very important: cannot screen primitives in Schwarz computation !!! auto epsilon = 0.; engines[0] = Engine(Kernel, std::max(bs1.max_nprim(), bs2.max_nprim()), std::max(bs1.max_l(), bs2.max_l()), 0, epsilon, params); for (size_t i = 1; i != nthreads; ++i) { engines[i] = engines[0]; } std::cout << "computing Schwarz bound prerequisites (kernel=" << (int)Kernel << ") ... "; libint2::Timers<1> timer; timer.set_now_overhead(25); timer.start(0); auto compute = [&](int thread_id) { const auto& buf = engines[thread_id].results(); // loop over permutationally-unique set of shells for (auto s1 = 0l, s12 = 0l; s1 != nsh1; ++s1) { auto n1 = bs1[s1].size(); // number of basis functions in this shell auto s2_max = bs1_equiv_bs2 ? s1 : nsh2 - 1; for (auto s2 = 0; s2 <= s2_max; ++s2, ++s12) { if (s12 % nthreads != thread_id) continue; auto n2 = bs2[s2].size(); auto n12 = n1 * n2; engines[thread_id].compute2<Kernel, BraKet::xx_xx, 0>(bs1[s1], bs2[s2], bs1[s1], bs2[s2]); assert(buf[0] != nullptr && "to compute Schwarz ints turn off primitive screening"); // the diagonal elements are the Schwarz ints ... use Map.diagonal() Eigen::Map<const Matrix> buf_mat(buf[0], n12, n12); auto norm2 = use_2norm ? buf_mat.diagonal().norm() : buf_mat.diagonal().lpNorm<Eigen::Infinity>(); K(s1, s2) = std::sqrt(norm2); if (bs1_equiv_bs2) K(s2, s1) = K(s1, s2); } } }; // thread lambda libint2::parallel_do(compute); timer.stop(0); std::cout << "done (" << timer.read(0) << " s)" << std::endl; return K; } Matrix compute_do_ints(const BasisSet& bs1, const BasisSet& bs2, bool use_2norm) { return compute_schwarz_ints<libint2::Operator::delta>(bs1, bs2, use_2norm); } shellpair_list_t compute_shellpair_list(const BasisSet& bs1, const BasisSet& _bs2, const double threshold) { const BasisSet& bs2 = (_bs2.empty() ? bs1 : _bs2); const auto nsh1 = bs1.size(); const auto nsh2 = bs2.size(); const auto bs1_equiv_bs2 = (&bs1 == &bs2); using libint2::nthreads; // construct the 2-electron repulsion integrals engine using libint2::Engine; std::vector<Engine> engines; engines.reserve(nthreads); engines.emplace_back(Operator::overlap, std::max(bs1.max_nprim(), bs2.max_nprim()), std::max(bs1.max_l(), bs2.max_l()), 0); for (size_t i = 1; i != nthreads; ++i) { engines.push_back(engines[0]); } std::cout << "computing non-negligible shell-pair list ... "; libint2::Timers<1> timer; timer.set_now_overhead(25); timer.start(0); shellpair_list_t result; std::mutex mx; auto compute = [&](int thread_id) { auto& engine = engines[thread_id]; const auto& buf = engine.results(); // loop over permutationally-unique set of shells for (auto s1 = 0l, s12 = 0l; s1 != nsh1; ++s1) { mx.lock(); if (result.find(s1) == result.end()) result.insert(std::make_pair(s1, std::vector<size_t>())); mx.unlock(); auto n1 = bs1[s1].size(); // number of basis functions in this shell auto s2_max = bs1_equiv_bs2 ? s1 : nsh2 - 1; for (auto s2 = 0; s2 <= s2_max; ++s2, ++s12) { if (s12 % nthreads != thread_id) continue; auto on_same_center = (bs1[s1].O == bs2[s2].O); bool significant = on_same_center; if (not on_same_center) { auto n2 = bs2[s2].size(); engines[thread_id].compute(bs1[s1], bs2[s2]); Eigen::Map<const Matrix> buf_mat(buf[0], n1, n2); auto norm = buf_mat.norm(); significant = (norm >= threshold); } if (significant) { mx.lock(); result[s1].emplace_back(s2); mx.unlock(); } } } }; // end of compute libint2::parallel_do(compute); // resort shell list in increasing order, i.e. result[s][s1] < result[s][s2] // if s1 < s2 auto sort = [&](int thread_id) { for (auto s1 = 0l; s1 != nsh1; ++s1) { if (s1 % nthreads == thread_id) { auto& list = result[s1]; std::sort(list.begin(), list.end()); } } }; // end of sort libint2::parallel_do(sort); timer.stop(0); std::cout << "done (" << timer.read(0) << " s)" << std::endl; return result; } // returns {X,X^{-1},rank,A_condition_number,result_A_condition_number}, where // X is the generalized square-root-inverse such that X.transpose() * A * X = I // // if symmetric is true, produce "symmetric" sqrtinv: X = U . A_evals_sqrtinv . // U.transpose()), // else produce "canonical" sqrtinv: X = U . A_evals_sqrtinv // where U are eigenvectors of A // rows and cols of symmetric X are equivalent; for canonical X the rows are // original basis (AO), // cols are transformed basis ("orthogonal" AO) // // A is conditioned to max_condition_number std::tuple<Matrix, Matrix, size_t, double, double> gensqrtinv( const Matrix& S, bool symmetric = false, double max_condition_number = 1e8) { Eigen::SelfAdjointEigenSolver<Matrix> eig_solver(S); auto U = eig_solver.eigenvectors(); auto s = eig_solver.eigenvalues(); auto s_max = s.maxCoeff(); auto condition_number = std::min( s_max / std::max(s.minCoeff(), std::numeric_limits<double>::min()), 1.0 / std::numeric_limits<double>::epsilon()); auto threshold = s_max / max_condition_number; long n = s.rows(); long n_cond = 0; for (long i = n - 1; i >= 0; --i) { if (s(i) >= threshold) { ++n_cond; } else i = 0; // skip rest since eigenvalues are in ascending order } auto sigma = s.bottomRows(n_cond); auto result_condition_number = sigma.maxCoeff() / sigma.minCoeff(); auto sigma_sqrt = sigma.array().sqrt().matrix().asDiagonal(); auto sigma_invsqrt = sigma.array().sqrt().inverse().matrix().asDiagonal(); // make canonical X/Xinv auto U_cond = U.block(0, n - n_cond, n, n_cond); Matrix X = U_cond * sigma_invsqrt; Matrix Xinv = U_cond * sigma_sqrt; // convert to symmetric, if needed if (symmetric) { X = X * U_cond.transpose(); Xinv = Xinv * U_cond.transpose(); } return std::make_tuple(X, Xinv, size_t(n_cond), condition_number, result_condition_number); } std::tuple<Matrix, Matrix, double> conditioning_orthogonalizer( const Matrix& S, double S_condition_number_threshold) { size_t obs_rank; double S_condition_number; double XtX_condition_number; Matrix X, Xinv; assert(S.rows() == S.cols()); std::tie(X, Xinv, obs_rank, S_condition_number, XtX_condition_number) = gensqrtinv(S, false, S_condition_number_threshold); auto obs_nbf_omitted = (long)S.rows() - (long)obs_rank; std::cout << "overlap condition number = " << S_condition_number; if (obs_nbf_omitted > 0) std::cout << " (dropped " << obs_nbf_omitted << " " << (obs_nbf_omitted > 1 ? "fns" : "fn") << " to reduce to " << XtX_condition_number << ")"; std::cout << std::endl; if (obs_nbf_omitted > 0) { Matrix should_be_I = X.transpose() * S * X; Matrix I = Matrix::Identity(should_be_I.rows(), should_be_I.cols()); std::cout << "||X^t * S * X - I||_2 = " << (should_be_I - I).norm() << " (should be 0)" << std::endl; } return std::make_tuple(X, Xinv, XtX_condition_number); } Matrix compute_2body_2index_ints(const BasisSet& bs) { using libint2::nthreads; const auto n = bs.nbf(); const auto nshells = bs.size(); Matrix result = Matrix::Zero(n, n); // build engines for each thread using libint2::Engine; std::vector<Engine> engines(nthreads); engines[0] = Engine(libint2::Operator::coulomb, bs.max_nprim(), bs.max_l(), 0); engines[0].set(BraKet::xs_xs); for (size_t i = 1; i != nthreads; ++i) { engines[i] = engines[0]; } auto shell2bf = bs.shell2bf(); auto unitshell = Shell::unit(); auto compute = [&](int thread_id) { auto& engine = engines[thread_id]; const auto& buf = engine.results(); // loop over unique shell pairs, {s1,s2} such that s1 >= s2 // this is due to the permutational symmetry of the real integrals over // Hermitian operators: (1|2) = (2|1) for (auto s1 = 0l, s12 = 0l; s1 != nshells; ++s1) { auto bf1 = shell2bf[s1]; // first basis function in this shell auto n1 = bs[s1].size(); for (auto s2 = 0; s2 <= s1; ++s2, ++s12) { if (s12 % nthreads != thread_id) continue; auto bf2 = shell2bf[s2]; auto n2 = bs[s2].size(); // compute shell pair; return is the pointer to the buffer engine.compute(bs[s1], bs[s2]); if (buf[0] == nullptr) continue; // if all integrals screened out, skip to next shell set // "map" buffer to a const Eigen Matrix, and copy it to the // corresponding blocks of the result Eigen::Map<const Matrix> buf_mat(buf[0], n1, n2); result.block(bf1, bf2, n1, n2) = buf_mat; if (s1 != s2) // if s1 >= s2, copy {s1,s2} to the corresponding {s2,s1} // block, note the transpose! result.block(bf2, bf1, n2, n1) = buf_mat.transpose(); } } }; // compute lambda libint2::parallel_do(compute); return result; } Matrix compute_2body_fock(const BasisSet& obs, const Matrix& D, double precision, const Matrix& Schwarz) { const auto n = obs.nbf(); const auto nshells = obs.size(); using libint2::nthreads; std::vector<Matrix> G(nthreads, Matrix::Zero(n, n)); const auto do_schwarz_screen = Schwarz.cols() != 0 && Schwarz.rows() != 0; Matrix D_shblk_norm = compute_shellblock_norm(obs, D); // matrix of infty-norms of shell blocks auto fock_precision = precision; // engine precision controls primitive truncation, assume worst-case scenario // (all primitive combinations add up constructively) auto max_nprim = obs.max_nprim(); auto max_nprim4 = max_nprim * max_nprim * max_nprim * max_nprim; auto engine_precision = std::min(fock_precision / D_shblk_norm.maxCoeff(), std::numeric_limits<double>::epsilon()) / max_nprim4; // construct the 2-electron repulsion integrals engine pool using libint2::Engine; std::vector<Engine> engines(nthreads); engines[0] = Engine(Operator::coulomb, obs.max_nprim(), obs.max_l(), 0); engines[0].set_precision(engine_precision); // shellset-dependent precision // control will likely break // positive definiteness // stick with this simple recipe std::cout << "compute_2body_fock:precision = " << precision << std::endl; std::cout << "Engine::precision = " << engines[0].precision() << std::endl; for (size_t i = 1; i != nthreads; ++i) { engines[i] = engines[0]; } std::atomic<size_t> num_ints_computed{0}; #if defined(REPORT_INTEGRAL_TIMINGS) std::vector<libint2::Timers<1>> timers(nthreads); #endif auto shell2bf = obs.shell2bf(); auto lambda = [&](int thread_id) { auto& engine = engines[thread_id]; auto& g = G[thread_id]; const auto& buf = engine.results(); #if defined(REPORT_INTEGRAL_TIMINGS) auto& timer = timers[thread_id]; timer.clear(); timer.set_now_overhead(25); #endif // loop over permutationally-unique set of shells for (auto s1 = 0l, s1234 = 0l; s1 != nshells; ++s1) { auto bf1_first = shell2bf[s1]; // first basis function in this shell auto n1 = obs[s1].size(); // number of basis functions in this shell for (const auto& s2 : obs_shellpair_list[s1]) { auto bf2_first = shell2bf[s2]; auto n2 = obs[s2].size(); const auto Dnorm12 = do_schwarz_screen ? D_shblk_norm(s1, s2) : 0.; for (auto s3 = 0; s3 <= s1; ++s3) { auto bf3_first = shell2bf[s3]; auto n3 = obs[s3].size(); const auto Dnorm123 = do_schwarz_screen ? std::max(D_shblk_norm(s1, s3), std::max(D_shblk_norm(s2, s3), Dnorm12)) : 0.; const auto s4_max = (s1 == s3) ? s2 : s3; for (const auto& s4 : obs_shellpair_list[s3]) { if (s4 > s4_max) break; // for each s3, s4 are stored in monotonically increasing // order if ((s1234++) % nthreads != thread_id) continue; const auto Dnorm1234 = do_schwarz_screen ? std::max( D_shblk_norm(s1, s4), std::max(D_shblk_norm(s2, s4), std::max(D_shblk_norm(s3, s4), Dnorm123))) : 0.; if (do_schwarz_screen && Dnorm1234 * Schwarz(s1, s2) * Schwarz(s3, s4) < fock_precision) continue; auto bf4_first = shell2bf[s4]; auto n4 = obs[s4].size(); num_ints_computed += n1 * n2 * n3 * n4; // compute the permutational degeneracy (i.e. # of equivalents) of // the given shell set auto s12_deg = (s1 == s2) ? 1.0 : 2.0; auto s34_deg = (s3 == s4) ? 1.0 : 2.0; auto s12_34_deg = (s1 == s3) ? (s2 == s4 ? 1.0 : 2.0) : 2.0; auto s1234_deg = s12_deg * s34_deg * s12_34_deg; #if defined(REPORT_INTEGRAL_TIMINGS) timer.start(0); #endif engine.compute2<Operator::coulomb, BraKet::xx_xx, 0>( obs[s1], obs[s2], obs[s3], obs[s4]); const auto* buf_1234 = buf[0]; if (buf_1234 == nullptr) continue; // if all integrals screened out, skip to next quartet #if defined(REPORT_INTEGRAL_TIMINGS) timer.stop(0); #endif Eigen::Map<MatrixXd> buf_1234_map(buf_1234, n12, n34); assert(buf_1234_map.norm() < Schwarz(s1, s2) * Schwarz(s3, s4)); // 1) each shell set of integrals contributes up to 6 shell sets of // the Fock matrix: // F(a,b) += (ab|cd) * D(c,d) // F(c,d) += (ab|cd) * D(a,b) // F(b,d) -= 1/4 * (ab|cd) * D(a,c) // F(b,c) -= 1/4 * (ab|cd) * D(a,d) // F(a,c) -= 1/4 * (ab|cd) * D(b,d) // F(a,d) -= 1/4 * (ab|cd) * D(b,c) // 2) each permutationally-unique integral (shell set) must be // scaled by its degeneracy, // i.e. the number of the integrals/sets equivalent to it // 3) the end result must be symmetrized for (auto f1 = 0, f1234 = 0; f1 != n1; ++f1) { const auto bf1 = f1 + bf1_first; for (auto f2 = 0; f2 != n2; ++f2) { const auto bf2 = f2 + bf2_first; for (auto f3 = 0; f3 != n3; ++f3) { const auto bf3 = f3 + bf3_first; for (auto f4 = 0; f4 != n4; ++f4, ++f1234) { const auto bf4 = f4 + bf4_first; const auto value = buf_1234[f1234]; const auto value_scal_by_deg = value * s1234_deg; g(bf1, bf2) += D(bf3, bf4) * value_scal_by_deg; g(bf3, bf4) += D(bf1, bf2) * value_scal_by_deg; g(bf1, bf3) -= 0.25 * D(bf2, bf4) * value_scal_by_deg; g(bf2, bf4) -= 0.25 * D(bf1, bf3) * value_scal_by_deg; g(bf1, bf4) -= 0.25 * D(bf2, bf3) * value_scal_by_deg; g(bf2, bf3) -= 0.25 * D(bf1, bf4) * value_scal_by_deg; } } } } } } } } }; // end of lambda libint2::parallel_do(lambda); // accumulate contributions from all threads for (size_t i = 1; i != nthreads; ++i) { G[0] += G[i]; } #if defined(REPORT_INTEGRAL_TIMINGS) double time_for_ints = 0.0; for (auto& t : timers) { time_for_ints += t.read(0); } std::cout << "time for integrals = " << time_for_ints << std::endl; for (int t = 0; t != nthreads; ++t) engines[t].print_timers(); #endif Matrix GG = 0.5 * (G[0] + G[0].transpose()); std::cout << "# of integrals = " << num_ints_computed << std::endl; // symmetrize the result and return return GG; } #if LIBINT2_DERIV_ERI_ORDER template <unsigned deriv_order> std::vector<Matrix> compute_2body_fock_deriv(const BasisSet& obs, const std::vector<libint2::Atom>& atoms, const Matrix& D, double precision, const Matrix& Schwarz) { const auto n = obs.nbf(); const auto nshells = obs.size(); const auto nderiv_shellset = libint2::num_geometrical_derivatives(4, deriv_order); // # of derivs for each shell quartet const auto nderiv = libint2::num_geometrical_derivatives( atoms.size(), deriv_order); // total # of derivs const auto ncoords_times_two = (atoms.size() * 3) * 2; using libint2::nthreads; std::vector<Matrix> G(nthreads * nderiv, Matrix::Zero(n, n)); const auto do_schwarz_screen = Schwarz.cols() != 0 && Schwarz.rows() != 0; Matrix D_shblk_norm = compute_shellblock_norm(obs, D); // matrix of infty-norms of shell blocks auto fock_precision = precision; // engine precision controls primitive truncation, assume worst-case scenario // (all primitive combinations add up constructively) auto max_nprim = obs.max_nprim(); auto max_nprim4 = max_nprim * max_nprim * max_nprim * max_nprim; auto engine_precision = std::min(fock_precision / D_shblk_norm.maxCoeff(), std::numeric_limits<double>::epsilon()) / max_nprim4; // construct the 2-electron repulsion integrals engine pool using libint2::Engine; std::vector<Engine> engines(nthreads); engines[0] = Engine(Operator::coulomb, obs.max_nprim(), obs.max_l(), deriv_order); engines[0].set_precision(engine_precision); // shellset-dependent precision // control will likely break // positive definiteness // stick with this simple recipe std::cout << "compute_2body_fock:precision = " << precision << std::endl; std::cout << "Engine::precision = " << engines[0].precision() << std::endl; for (size_t i = 1; i != nthreads; ++i) { engines[i] = engines[0]; } std::atomic<size_t> num_ints_computed{0}; #if defined(REPORT_INTEGRAL_TIMINGS) std::vector<libint2::Timers<1>> timers(nthreads); #endif auto shell2bf = obs.shell2bf(); auto shell2atom = obs.shell2atom(atoms); auto lambda = [&](int thread_id) { auto& engine = engines[thread_id]; const auto& buf = engine.results(); #if defined(REPORT_INTEGRAL_TIMINGS) auto& timer = timers[thread_id]; timer.clear(); timer.set_now_overhead(25); #endif size_t shell_atoms[4]; // loop over permutationally-unique set of shells for (auto s1 = 0l, s1234 = 0l; s1 != nshells; ++s1) { auto bf1_first = shell2bf[s1]; // first basis function in this shell auto n1 = obs[s1].size(); // number of basis functions in this shell shell_atoms[0] = shell2atom[s1]; for (const auto& s2 : obs_shellpair_list[s1]) { auto bf2_first = shell2bf[s2]; auto n2 = obs[s2].size(); shell_atoms[1] = shell2atom[s2]; const auto Dnorm12 = do_schwarz_screen ? D_shblk_norm(s1, s2) : 0.; for (auto s3 = 0; s3 <= s1; ++s3) { auto bf3_first = shell2bf[s3]; auto n3 = obs[s3].size(); shell_atoms[2] = shell2atom[s3]; const auto Dnorm123 = do_schwarz_screen ? std::max(D_shblk_norm(s1, s3), std::max(D_shblk_norm(s2, s3), Dnorm12)) : 0.; const auto s4_max = (s1 == s3) ? s2 : s3; for (const auto& s4 : obs_shellpair_list[s3]) { if (s4 > s4_max) break; // for each s3, s4 are stored in monotonically increasing // order if ((s1234++) % nthreads != thread_id) continue; const auto Dnorm1234 = do_schwarz_screen ? std::max( D_shblk_norm(s1, s4), std::max(D_shblk_norm(s2, s4), std::max(D_shblk_norm(s3, s4), Dnorm123))) : 0.; if (do_schwarz_screen && Dnorm1234 * Schwarz(s1, s2) * Schwarz(s3, s4) < fock_precision) continue; auto bf4_first = shell2bf[s4]; auto n4 = obs[s4].size(); shell_atoms[3] = shell2atom[s4]; const auto n1234 = n1 * n2 * n3 * n4; // compute the permutational degeneracy (i.e. # of equivalents) of // the given shell set auto s12_deg = (s1 == s2) ? 1.0 : 2.0; auto s34_deg = (s3 == s4) ? 1.0 : 2.0; auto s12_34_deg = (s1 == s3) ? (s2 == s4 ? 1.0 : 2.0) : 2.0; auto s1234_deg = s12_deg * s34_deg * s12_34_deg; // computes contribution from shell set \c idx to the operator matrix with // index \c op auto add_shellset_to_dest = [&]( std::size_t op, std::size_t idx, int coord1, int coord2, double scale = 1.0) { auto& g = G[op]; auto shset = buf[idx]; const auto weight = scale * s1234_deg; for (auto f1 = 0, f1234 = 0; f1 != n1; ++f1) { const auto bf1 = f1 + bf1_first; for (auto f2 = 0; f2 != n2; ++f2) { const auto bf2 = f2 + bf2_first; for (auto f3 = 0; f3 != n3; ++f3) { const auto bf3 = f3 + bf3_first; for (auto f4 = 0; f4 != n4; ++f4, ++f1234) { const auto bf4 = f4 + bf4_first; const auto value = shset[f1234]; const auto wvalue = value * weight; g(bf1, bf2) += D(bf3, bf4) * wvalue; g(bf3, bf4) += D(bf1, bf2) * wvalue; g(bf1, bf3) -= 0.25 * D(bf2, bf4) * wvalue; g(bf2, bf4) -= 0.25 * D(bf1, bf3) * wvalue; g(bf1, bf4) -= 0.25 * D(bf2, bf3) * wvalue; g(bf2, bf3) -= 0.25 * D(bf1, bf4) * wvalue; } } } } }; #if defined(REPORT_INTEGRAL_TIMINGS) timer.start(0); #endif engine.compute2<Operator::coulomb, BraKet::xx_xx, deriv_order>( obs[s1], obs[s2], obs[s3], obs[s4]); if (buf[0] == nullptr) continue; // if all integrals screened out, skip to next quartet num_ints_computed += nderiv_shellset * n1234; #if defined(REPORT_INTEGRAL_TIMINGS) timer.stop(0); #endif switch (deriv_order) { case 0: { int coord1 = 0, coord2 = 0; add_shellset_to_dest(thread_id, 0, coord1, coord2); } break; case 1: { for (auto d = 0; d != 12; ++d) { const int a = d / 3; const int xyz = d % 3; auto coord = shell_atoms[a] * 3 + xyz; auto& g = G[thread_id * nderiv + coord]; int coord1 = 0, coord2 = 0; add_shellset_to_dest(thread_id * nderiv + coord, d, coord1, coord2); } // d \in [0,12) } break; case 2: { // computes upper triangle index // n2 = matrix size times 2 // i,j = (unordered) indices #define upper_triangle_index(n2, i, j) \ (std::min((i), (j))) * ((n2) - (std::min((i), (j))) - 1) / 2 + \ (std::max((i), (j))) // look over shellsets in the order in which they appear std::size_t shellset_idx = 0; for (auto c1 = 0; c1 != 4; ++c1) { auto a1 = shell_atoms[c1]; auto coord1 = 3 * a1; for (auto xyz1 = 0; xyz1 != 3; ++xyz1, ++coord1) { for (auto c2 = c1; c2 != 4; ++c2) { auto a2 = shell_atoms[c2]; auto xyz2_start = (c1 == c2) ? xyz1 : 0; auto coord2 = 3 * a2 + xyz2_start; for (auto xyz2 = xyz2_start; xyz2 != 3; ++xyz2, ++coord2) { double scale = (coord1 == coord2 && c1 != c2) ? 2.0 : 1.0; const auto coord12 = upper_triangle_index( ncoords_times_two, coord1, coord2); auto op = thread_id * nderiv + coord12; add_shellset_to_dest(op, shellset_idx, coord1, coord2, scale); ++shellset_idx; } } } } } break; #undef upper_triangle_index default: assert(deriv_order <= 2 && "support for 3rd and higher derivatives of the Fock " "matrix not yet implemented"); } } } } } }; // end of lambda libint2::parallel_do(lambda); // accumulate contributions from all threads for (size_t t = 1; t != nthreads; ++t) { for (auto d = 0; d != nderiv; ++d) { G[d] += G[t * nderiv + d]; } } #if defined(REPORT_INTEGRAL_TIMINGS) double time_for_ints = 0.0; for (auto& t : timers) { time_for_ints += t.read(0); } std::cout << "time for integrals = " << time_for_ints << std::endl; for (int t = 0; t != nthreads; ++t) engines[t].print_timers(); #endif std::vector<Matrix> GG(nderiv); for (auto d = 0; d != nderiv; ++d) { GG[d] = 0.5 * (G[d] + G[d].transpose()); } std::cout << "# of integrals = " << num_ints_computed << std::endl; // symmetrize the result and return return GG; } #endif Matrix compute_2body_fock_general(const BasisSet& obs, const Matrix& D, const BasisSet& D_bs, bool D_is_shelldiagonal, double precision) { const auto n = obs.nbf(); const auto nshells = obs.size(); const auto n_D = D_bs.nbf(); assert(D.cols() == D.rows() && D.cols() == n_D); using libint2::nthreads; std::vector<Matrix> G(nthreads, Matrix::Zero(n, n)); // construct the 2-electron repulsion integrals engine using libint2::Engine; std::vector<Engine> engines(nthreads); engines[0] = Engine(libint2::Operator::coulomb, std::max(obs.max_nprim(), D_bs.max_nprim()), std::max(obs.max_l(), D_bs.max_l()), 0); engines[0].set_precision(precision); // shellset-dependent precision control // will likely break positive // definiteness // stick with this simple recipe for (size_t i = 1; i != nthreads; ++i) { engines[i] = engines[0]; } auto shell2bf = obs.shell2bf(); auto shell2bf_D = D_bs.shell2bf(); auto lambda = [&](int thread_id) { auto& engine = engines[thread_id]; auto& g = G[thread_id]; const auto& buf = engine.results(); // loop over permutationally-unique set of shells for (auto s1 = 0l, s1234 = 0l; s1 != nshells; ++s1) { auto bf1_first = shell2bf[s1]; // first basis function in this shell auto n1 = obs[s1].size(); // number of basis functions in this shell for (auto s2 = 0; s2 <= s1; ++s2) { auto bf2_first = shell2bf[s2]; auto n2 = obs[s2].size(); for (auto s3 = 0; s3 < D_bs.size(); ++s3) { auto bf3_first = shell2bf_D[s3]; auto n3 = D_bs[s3].size(); auto s4_begin = D_is_shelldiagonal ? s3 : 0; auto s4_fence = D_is_shelldiagonal ? s3 + 1 : D_bs.size(); for (auto s4 = s4_begin; s4 != s4_fence; ++s4, ++s1234) { if (s1234 % nthreads != thread_id) continue; auto bf4_first = shell2bf_D[s4]; auto n4 = D_bs[s4].size(); // compute the permutational degeneracy (i.e. # of equivalents) of // the given shell set auto s12_deg = (s1 == s2) ? 1.0 : 2.0; if (s3 >= s4) { auto s34_deg = (s3 == s4) ? 1.0 : 2.0; auto s1234_deg = s12_deg * s34_deg; // auto s1234_deg = s12_deg; engine.compute2<Operator::coulomb, BraKet::xx_xx, 0>( obs[s1], obs[s2], D_bs[s3], D_bs[s4]); const auto* buf_1234 = buf[0]; if (buf_1234 != nullptr) { for (auto f1 = 0, f1234 = 0; f1 != n1; ++f1) { const auto bf1 = f1 + bf1_first; for (auto f2 = 0; f2 != n2; ++f2) { const auto bf2 = f2 + bf2_first; for (auto f3 = 0; f3 != n3; ++f3) { const auto bf3 = f3 + bf3_first; for (auto f4 = 0; f4 != n4; ++f4, ++f1234) { const auto bf4 = f4 + bf4_first; const auto value = buf_1234[f1234]; const auto value_scal_by_deg = value * s1234_deg; g(bf1, bf2) += 2.0 * D(bf3, bf4) * value_scal_by_deg; } } } } } } engine.compute2<Operator::coulomb, BraKet::xx_xx, 0>( obs[s1], D_bs[s3], obs[s2], D_bs[s4]); const auto* buf_1324 = buf[0]; if (buf_1324 == nullptr) continue; // if all integrals screened out, skip to next quartet for (auto f1 = 0, f1324 = 0; f1 != n1; ++f1) { const auto bf1 = f1 + bf1_first; for (auto f3 = 0; f3 != n3; ++f3) { const auto bf3 = f3 + bf3_first; for (auto f2 = 0; f2 != n2; ++f2) { const auto bf2 = f2 + bf2_first; for (auto f4 = 0; f4 != n4; ++f4, ++f1324) { const auto bf4 = f4 + bf4_first; const auto value = buf_1324[f1324]; const auto value_scal_by_deg = value * s12_deg; g(bf1, bf2) -= D(bf3, bf4) * value_scal_by_deg; } } } } } } } } }; // thread lambda libint2::parallel_do(lambda); // accumulate contributions from all threads for (size_t i = 1; i != nthreads; ++i) { G[0] += G[i]; } // symmetrize the result and return return 0.5 * (G[0] + G[0].transpose()); } #ifdef HAVE_DENSITY_FITTING Matrix DFFockEngine::compute_2body_fock_dfC(const Matrix& Cocc) { using libint2::nthreads; const auto n = obs.nbf(); const auto ndf = dfbs.nbf(); libint2::Timers<1> wall_timer; wall_timer.set_now_overhead(25); std::vector<libint2::Timers<5>> timers(nthreads); for(auto& timer: timers) timer.set_now_overhead(25); typedef btas::RangeNd<CblasRowMajor, std::array<long, 1>> Range1d; typedef btas::RangeNd<CblasRowMajor, std::array<long, 2>> Range2d; typedef btas::Tensor<double, Range1d> Tensor1d; typedef btas::Tensor<double, Range2d> Tensor2d; // using first time? compute 3-center ints and transform to inv sqrt // representation if (xyK.size() == 0) { wall_timer.start(0); const auto nshells = obs.size(); const auto nshells_df = dfbs.size(); const auto& unitshell = libint2::Shell::unit(); // construct the 2-electron 3-center repulsion integrals engine // since the code assumes (xx|xs) braket, and Engine/libint only produces // (xs|xx), use 4-center engine std::vector<libint2::Engine> engines(nthreads); engines[0] = libint2::Engine(libint2::Operator::coulomb, std::max(obs.max_nprim(), dfbs.max_nprim()), std::max(obs.max_l(), dfbs.max_l()), 0); engines[0].set(BraKet::xs_xx); for (size_t i = 1; i != nthreads; ++i) { engines[i] = engines[0]; } auto shell2bf = obs.shell2bf(); auto shell2bf_df = dfbs.shell2bf(); Tensor3d Zxy{ndf, n, n}; auto lambda = [&](int thread_id) { auto& engine = engines[thread_id]; auto& timer = timers[thread_id]; const auto& results = engine.results(); // loop over permutationally-unique set of shells for (auto s1 = 0l, s123 = 0l; s1 != nshells_df; ++s1) { auto bf1_first = shell2bf_df[s1]; // first basis function in this shell auto n1 = dfbs[s1].size(); // number of basis functions in this shell for (auto s2 = 0; s2 != nshells; ++s2) { auto bf2_first = shell2bf[s2]; auto n2 = obs[s2].size(); const auto n12 = n1 * n2; for (auto s3 = 0; s3 != nshells; ++s3, ++s123) { if (s123 % nthreads != thread_id) continue; auto bf3_first = shell2bf[s3]; auto n3 = obs[s3].size(); const auto n123 = n12 * n3; timer.start(0); engine.compute2<Operator::coulomb, BraKet::xs_xx, 0>( dfbs[s1], unitshell, obs[s2], obs[s3]); const auto* buf = results[0]; if (buf == nullptr) continue; timer.stop(0); timer.start(1); auto lower_bound = {bf1_first, bf2_first, bf3_first}; auto upper_bound = {bf1_first + n1, bf2_first + n2, bf3_first + n3}; auto view = btas::make_view( Zxy.range().slice(lower_bound, upper_bound), Zxy.storage()); std::copy(buf, buf + n123, view.begin()); timer.stop(1); } // s3 } // s2 } // s1 }; // lambda libint2::parallel_do(lambda); wall_timer.stop(0); double ints_time = 0; for(const auto& timer: timers) ints_time += timer.read(0); std::cout << "time for Zxy integrals = " << ints_time << " (total from all threads)" << std::endl; double copy_time = 0; for(const auto& timer: timers) copy_time += timer.read(1); std::cout << "time for copying into BTAS = " << copy_time << " (total from all threads)"<< std::endl; std::cout << "wall time for Zxy integrals + copy = " << wall_timer.read(0) << std::endl; timers[0].start(2); Matrix V = compute_2body_2index_ints(dfbs); Eigen::LLT<Matrix> V_LLt(V); Matrix I = Matrix::Identity(ndf, ndf); auto L = V_LLt.matrixL(); Matrix V_L = L; Matrix Linv = L.solve(I).transpose(); // check // std::cout << "||V - L L^t|| = " << (V - V_L * V_L.transpose()).norm() << // std::endl; // std::cout << "||I - L L^-1^t|| = " << (I - V_L * // Linv.transpose()).norm() << std::endl; // std::cout << "||V^-1 - L^-1 L^-1^t|| = " << (V.inverse() - Linv * // Linv.transpose()).norm() << std::endl; Tensor2d K{ndf, ndf}; std::copy(Linv.data(), Linv.data() + ndf * ndf, K.begin()); xyK = Tensor3d{n, n, ndf}; btas::contract(1.0, Zxy, {1, 2, 3}, K, {1, 4}, 0.0, xyK, {2, 3, 4}); Zxy = Tensor3d{0, 0, 0}; // release memory timers[0].stop(2); std::cout << "time for integrals metric tform = " << timers[0].read(2) << std::endl; } // if (xyK.size() == 0) // compute exchange timers[0].start(3); const auto nocc = Cocc.cols(); Tensor2d Co{n, nocc}; std::copy(Cocc.data(), Cocc.data() + n * nocc, Co.begin()); Tensor3d xiK{n, nocc, ndf}; btas::contract(1.0, xyK, {1, 2, 3}, Co, {2, 4}, 0.0, xiK, {1, 4, 3}); Tensor2d G{n, n}; btas::contract(1.0, xiK, {1, 2, 3}, xiK, {4, 2, 3}, 0.0, G, {1, 4}); timers[0].stop(3); std::cout << "time for exchange = " << timers[0].read(3) << std::endl; // compute Coulomb timers[0].start(4); Tensor1d Jtmp{ndf}; btas::contract(1.0, xiK, {1, 2, 3}, Co, {1, 2}, 0.0, Jtmp, {3}); xiK = Tensor3d{0, 0, 0}; btas::contract(2.0, xyK, {1, 2, 3}, Jtmp, {3}, -1.0, G, {1, 2}); timers[0].stop(4); std::cout << "time for coulomb = " << timers[0].read(4) << std::endl; // copy result to an Eigen::Matrix Matrix result(n, n); std::copy(G.cbegin(), G.cend(), result.data()); return result; } #endif // HAVE_DENSITY_FITTING // should be a unit test somewhere void api_basic_compile_test(const BasisSet& obs) { using namespace libint2; Engine onebody_engine( Operator::overlap, // will compute overlap ints obs.max_nprim(), // max # of primitives in shells this engine will // accept obs.max_l() // max angular momentum of shells this engine will accept ); auto shell2bf = obs.shell2bf(); const auto& results = onebody_engine.results(); for (auto s1 = 0; s1 != obs.size(); ++s1) { for (auto s2 = 0; s2 != obs.size(); ++s2) { std::cout << "compute shell set {" << s1 << "," << s2 << "} ... "; onebody_engine.compute(obs[s1], obs[s2]); const auto* ints_shellset = results[0]; std::cout << "done" << std::endl; auto bf1 = shell2bf[s1]; // first basis function in first shell auto n1 = obs[s1].size(); // number of basis functions in first shell auto bf2 = shell2bf[s2]; // first basis function in second shell auto n2 = obs[s2].size(); // number of basis functions in second shell // this iterates over integrals in the order they are packed in array // ints_shellset for (auto f1 = 0; f1 != n1; ++f1) for (auto f2 = 0; f2 != n2; ++f2) std::cout << " " << bf1 + f1 << " " << bf2 + f2 << " " << ints_shellset[f1 * n2 + f2] << std::endl; } } using libint2::Operator; std::vector<std::pair<double, double>> cgtg_params{ {0.1, 0.2}, {0.3, 0.4}, {0.5, 0.6}}; { auto K = compute_schwarz_ints<Operator::cgtg>(obs, obs, false, cgtg_params); std::cout << "cGTG Schwarz ints\n" << K << std::endl; } { auto K = compute_schwarz_ints<Operator::cgtg_x_coulomb>(obs, obs, false, cgtg_params); std::cout << "cGTG/r12 Schwarz ints\n" << K << std::endl; } { auto K = compute_schwarz_ints<Operator::delcgtg2>(obs, obs, false, cgtg_params); std::cout << "||Del.cGTG||^2 Schwarz ints\n" << K << std::endl; } { // test 2-index ints Engine eri4_engine(Operator::coulomb, obs.max_nprim(), obs.max_l()); Engine eri2_engine = eri4_engine; eri2_engine.set(BraKet::xs_xs); auto shell2bf = obs.shell2bf(); const auto& results4 = eri4_engine.results(); const auto& results2 = eri2_engine.results(); for (auto s1 = 0; s1 != obs.size(); ++s1) { for (auto s2 = 0; s2 != obs.size(); ++s2) { eri4_engine.compute(obs[s1], Shell::unit(), obs[s2], Shell::unit()); eri2_engine.compute(obs[s1], obs[s2]); auto bf1 = shell2bf[s1]; // first basis function in first shell auto n1 = obs[s1].size(); // number of basis functions in first shell auto bf2 = shell2bf[s2]; // first basis function in second shell auto n2 = obs[s2].size(); // number of basis functions in second shell const auto* buf4 = results4[0]; const auto* buf2 = results2[0]; // this iterates over integrals in the order they are packed in array // ints_shellset for (auto f1 = 0, f12 = 0; f1 != n1; ++f1) for (auto f2 = 0; f2 != n2; ++f2, ++f12) assert(std::abs(buf4[f12] - buf2[f12]) < 1e-12 && "2-center ints test failed"); } } } { // test 3-index ints Engine eri4_engine(Operator::coulomb, obs.max_nprim(), obs.max_l()); Engine eri3_engine = eri4_engine; eri3_engine.set(BraKet::xs_xx); auto shell2bf = obs.shell2bf(); const auto& results4 = eri4_engine.results(); const auto& results3 = eri3_engine.results(); for (auto s1 = 0; s1 != obs.size(); ++s1) { for (auto s2 = 0; s2 != obs.size(); ++s2) { for (auto s3 = 0; s3 != obs.size(); ++s3) { eri4_engine.compute(obs[s1], Shell::unit(), obs[s2], obs[s3]); eri3_engine.compute(obs[s1], obs[s2], obs[s3]); auto bf1 = shell2bf[s1]; // first basis function in first shell auto n1 = obs[s1].size(); // number of basis functions in first shell auto bf2 = shell2bf[s2]; // first basis function in second shell auto n2 = obs[s2].size(); // number of basis functions in second shell auto bf3 = shell2bf[s3]; // first basis function in third shell auto n3 = obs[s3].size(); // number of basis functions in third shell const auto* buf4 = results4[0]; const auto* buf3 = results3[0]; // this iterates over integrals in the order they are packed in array // ints_shellset for (auto f1 = 0, f123 = 0; f1 != n1; ++f1) for (auto f2 = 0; f2 != n2; ++f2) for (auto f3 = 0; f3 != n3; ++f3, ++f123) assert(std::abs(buf4[f123] - buf3[f123]) < 1e-12 && "3-center ints test failed"); } } } } #if LIBINT2_DERIV_ERI_ORDER { // test deriv 2-index ints Engine eri4_engine(Operator::coulomb, obs.max_nprim(), obs.max_l(), 1); Engine eri2_engine = eri4_engine; eri2_engine.set(BraKet::xs_xs); auto shell2bf = obs.shell2bf(); const auto& results4 = eri4_engine.results(); const auto& results2 = eri2_engine.results(); for (auto s1 = 0; s1 != obs.size(); ++s1) { for (auto s2 = 0; s2 != obs.size(); ++s2) { eri4_engine.compute(obs[s1], Shell::unit(), obs[s2], Shell::unit()); eri2_engine.compute(obs[s1], obs[s2]); auto bf1 = shell2bf[s1]; // first basis function in first shell auto n1 = obs[s1].size(); // number of basis functions in first shell auto bf2 = shell2bf[s2]; // first basis function in second shell auto n2 = obs[s2].size(); // number of basis functions in second shell // loop over derivative shell sets for(auto d=0; d!=6; ++d) { const auto* buf4 = results4[d<3 ? d : d+3]; const auto* buf2 = results2[d]; // this iterates over integrals in the order they are packed in array // ints_shellset for (auto f1 = 0, f12 = 0; f1 != n1; ++f1) for (auto f2 = 0; f2 != n2; ++f2, ++f12) assert(std::abs(buf4[f12] - buf2[f12]) < 1e-12 && "deriv 2-center ints test failed"); } } } } #endif #if LIBINT2_DERIV_ERI_ORDER > 1 { // test 2nd deriv 2-index ints Engine eri4_engine(Operator::coulomb, obs.max_nprim(), obs.max_l(), 2); Engine eri2_engine = eri4_engine; eri2_engine.set(BraKet::xs_xs); auto shell2bf = obs.shell2bf(); const auto& results4 = eri4_engine.results(); const auto& results2 = eri2_engine.results(); for (auto s1 = 0; s1 != obs.size(); ++s1) { for (auto s2 = 0; s2 != obs.size(); ++s2) { eri4_engine.compute(obs[s1], Shell::unit(), obs[s2], Shell::unit()); eri2_engine.compute(obs[s1], obs[s2]); auto bf1 = shell2bf[s1]; // first basis function in first shell auto n1 = obs[s1].size(); // number of basis functions in first shell auto bf2 = shell2bf[s2]; // first basis function in second shell auto n2 = obs[s2].size(); // number of basis functions in second shell // loop over derivative shell sets for (auto d1 = 0, d12 = 0; d1 != 6; ++d1) { const auto dd1 = d1 < 3 ? d1 : d1 + 3; for (auto d2 = d1; d2 != 6; ++d2, ++d12) { const auto dd2 = d2 < 3 ? d2 : d2 + 3; const auto dd12 = dd1 * (24 - dd1 - 1) / 2 + dd2; const auto* buf4 = results4[dd12]; const auto* buf2 = results2[d12]; // this iterates over integrals in the order they are packed in // array // ints_shellset for (auto f1 = 0, f12 = 0; f1 != n1; ++f1) for (auto f2 = 0; f2 != n2; ++f2, ++f12) assert(std::abs(buf4[f12] - buf2[f12]) < 1e-12 && "2nd deriv 2-center ints test failed"); } } } } } #endif }
example_09-StructOfArrays-CellLinkedList-OuterLoop-LoadBalanced.c
/* * SPDX-License-Identifier: BSD-3-Clause * * example_09-StructOfArrays-CellLinkedList-OuterLoop-LoadBalanced.c : * Example of SPH Density Calculation using * fast neighbor search the main density loop via * Cell Linked List method, Struct of Arrays (SoA) * data layout, OpenMP parallelization at the * cell-pair level, SIMD directives in the kernel * and in the inner-most loop. It also implements * load balancing by moving the parallelism from * iterating over cells to iterate over cell pairs. * * (C) Copyright 2021 José Hugo Elsas * Author: José Hugo Elsas <jhelsas@gmail.com> * * Command Line Options: * -runs <int> : Set the number of repetitions (runs) for * calculating the density. The value of * the density is based on the last * iteration. * Default value: 1 * -run_seed <int>: Flag to set an alternative seed use for * for the PRNG. Instead of feeding seed * to the PRNG directly, it feeds * seed + iteration, as to generate different * configurations for each iteration. * Default value: 0 - (possible 0/1) * -seed <int>: Set the seed to use for the SPH particles * uniform position generation in the box * Default value: 123123123 * * -N <int>: Set the number of SPH particles to be used * Default value: 1e5 = 100,000 * -h <float>: Set the value of the smoothing kernel * parameter h, which corresponds to half * of the support of the kernel. * Default value: 0.05 * * -Nx <int>: Set the number of Cells in the X direction * Default value: 10 * -Ny <int>: Set the number of Cells in the Y direction * Default value: 10 * -Nz <int>: Set the number of Cells in the Z direction * Default value: 10 * * -Xmin <float>: Set the lower bound in the X direction for * the Cell Linked List box * Default value: 0.0 * -Ymin <float>: Set the lower bound in the Y direction for * the Cell Linked List box * Default value: 0.0 * -Ymin <float>: Set the lower bound in the Z direction for * the Cell Linked List box * Default value: 0.0 * * -Xmax <float>: Set the lower bound in the X direction for * the Cell Linked List box * Default value: 1.0 * -Ymax <float>: Set the lower bound in the Y direction for * the Cell Linked List box * Default value: 1.0 * -Zmax <float>: Set the lower bound in the Z direction for * the Cell Linked List box * Default value: 1.0 */ #include <math.h> #include <ctype.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <limits.h> #include <unistd.h> #include <stdbool.h> #include <sys/time.h> #include <inttypes.h> #include <omp.h> #include <gsl/gsl_math.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_heapsort.h> #include "sph_data_types.h" #include "sph_linked_list.h" #include "sph_utils.h" #ifndef M_PI #define M_PI (3.14159265358979323846) #endif #define COMPUTE_BLOCKS 5 int main_loop(int run, bool run_seed, int64_t N, double h, long int seed, void *swap_arr, linkedListBox *box, SPHparticle *lsph, double *times); int compute_density_3d_load_ballanced(int N, double h, SPHparticle *lsph, linkedListBox *box); int compute_density_3d_chunk_noomp(int64_t node_begin, int64_t node_end, int64_t nb_begin, int64_t nb_end,double h, double* restrict x, double* restrict y, double* restrict z, double* restrict nu, double* restrict rho); double w_bspline_3d_constant(double h); #pragma omp declare simd double w_bspline_3d_simd(double q); int main(int argc, char **argv){ bool run_seed = false; // By default the behavior is is to use the same seed int runs = 1,err; // it only runs once long int seed = 123123123; // The default seed is 123123123 int64_t N = 100000; // The default number of particles is N = 1e5 = 100,000 double h=0.05; // The default kernel smoothing length is h = 0.05 linkedListBox *box; // Uninitialized Box containing the cells for the cell linked list method SPHparticle *lsph; // Uninitialized array of SPH particles box = (linkedListBox*)malloc(1*sizeof(linkedListBox)); // Create a box representing the entire 3d domain // allow for command line customization of the run arg_parse(argc,argv,&N,&h,&seed,&runs,&run_seed,box); // Parse the command line options // line arguments and override default values err = SPHparticle_SoA_malloc(N,&lsph); if(err) fprintf(stderr,"error in SPHparticle_SoA_malloc\n"); void *swap_arr = malloc(N*sizeof(double)); double times[runs*COMPUTE_BLOCKS]; for(int run=0;run<runs;run+=1) main_loop(run,run_seed,N,h,seed,swap_arr,box,lsph,times); bool is_cll = true; const char *prefix = "ex09,cll,SoA,outer,simd,loadBallance"; print_time_stats(prefix,is_cll,N,h,seed,runs,lsph,box,times); print_sph_particles_density(prefix,is_cll,N,h,seed,runs,lsph,box); SPHparticleSOA_safe_free(N,&lsph); safe_free_box(box); free(swap_arr); return 0; } /* * Function main_loop: * Runs the main loop of the program, including the particle array generation, * density calculation and the timings annotations. * * Arguments: * run <int> : index (or value) or the present iteration * run_seed <bool> : boolean defining whether to use run index for seed or not * N <int> : Number of SPH particles to be used in the run * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * seed <long int> : seed for GSL PRNG generator to generate particle positions * box <linkedListBox> : Box of linked list cells, encapsulating the 3d domain * lsph <SPHparticle> : Array (pointer) of SPH particles to be updated * times <double> : Array to store the computation timings to be updated * Returns: * 0 : error code returned * lsph <SPHparticle> : SPH particle array is updated in the rho field by reference * times <double> : Times is updated by reference */ int main_loop(int run, bool run_seed, int64_t N, double h, long int seed, void *swap_arr, linkedListBox *box, SPHparticle *lsph, double *times) { int err; if(run_seed) err = gen_unif_rdn_pos_box(N,seed+run,box,lsph); else err = gen_unif_rdn_pos_box(N,seed,box,lsph); if(err) fprintf(stderr,"error in gen_unif_rdn_pos\n"); // ------------------------------------------------------ // double t0,t1,t2,t3,t4,t5; t0 = omp_get_wtime(); err = compute_hash_MC3D(N,lsph,box); // Compute Morton Z 3D hash based on the if(err) // cell index for each of the X, Y and Z fprintf(stderr,"error in compute_hash_MC3D\n"); // directions, in which a given particle reside t1 = omp_get_wtime(); qsort(lsph->hash,N,2*sizeof(int64_t),compare_int64_t); // Sort the Particle Hash Hashes, getting the shuffled // index necessary to re-shuffle the remaining arrays t2 = omp_get_wtime(); err = reorder_lsph_SoA(N,lsph,swap_arr); // Reorder all arrays according to the sorted hash, if(err) // As to have a quick way to retrieve a cell fprintf(stderr,"error in reorder_lsph_SoA\n"); // given its hash. t3 = omp_get_wtime(); err = setup_interval_hashtables(N,lsph,box); // Annotate the begining and end of each cell if(err) // on the cell linked list method for fast fprintf(stderr,"error in setup_interval_hashtables\n"); // neighbor search t4 = omp_get_wtime(); err = compute_density_3d_load_ballanced(N,h,lsph,box); // Compute the density of the particles based if(err) // on the cell linked list method for fast fprintf(stderr,"error in compute_density\n"); // neighbor search // ------------------------------------------------------ // t5 = omp_get_wtime(); times[COMPUTE_BLOCKS*run+0] = t1-t0; // Time for compute morton Z 3d hash times[COMPUTE_BLOCKS*run+1] = t2-t1; // Time for sorting the particles' hashes times[COMPUTE_BLOCKS*run+2] = t3-t2; // Time for reordering all other arrays accordingly times[COMPUTE_BLOCKS*run+3] = t4-t3; // Time for setting up the interval hash tables times[COMPUTE_BLOCKS*run+4] = t5-t4; // Time for computing the SPH particle densities return 0; } /* * Function compute_density_3d_load_ballanced: * Computes the SPH density from the particles using cell linked list with * vectorization at the compute_density_3d_chunk level, but the parallelization * done at the level of the outer-most loop of the compute_density_3d_cll_outerOmp * function, not at the chunk level. * * The parallelization is done at the level of cell pair instead of cells, with * the indexes for the cell pairs pre-computed before parallelization. * * Arguments: * N <int> : Number of SPH particles to be used in the run * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * lsph <SPHparticle> : Array (pointer) of SPH particles to be updated * Returns: * 0 : error code returned * lsph <SPHparticle> : SPH particle array is updated in the rho field by reference */ int compute_density_3d_load_ballanced(int N, double h, SPHparticle *lsph, linkedListBox *box){ int64_t *node_begin,*node_end,*nb_begin,*nb_end; // Define the arrays for cell boundaries int64_t max_cell_pair_count = 0; // and the number of cell pairs max_cell_pair_count = count_box_pairs(box); // compute the number of cell pairs node_begin = (int64_t*)malloc(max_cell_pair_count*sizeof(int64_t)); // allocate space for node_begin node_end = (int64_t*)malloc(max_cell_pair_count*sizeof(int64_t)); // allocate space for node_end nb_begin = (int64_t*)malloc(max_cell_pair_count*sizeof(int64_t)); // allocate space for nb_begin nb_end = (int64_t*)malloc(max_cell_pair_count*sizeof(int64_t)); // allocate space for nb_end setup_box_pairs(box,node_begin,node_end,nb_begin,nb_end); // set the values for cell pairs memset(lsph->rho,(int)0,N*sizeof(double)); // Pre-initialize the density to zero #pragma omp parallel for // execute in parallel for(size_t i=0;i<max_cell_pair_count;i+=1){ // iterate over cell pairs' array compute_density_3d_chunk_noomp(node_begin[i],node_end[i], // compute the cell pair contribution nb_begin[i],nb_end[i], h,lsph->x,lsph->y,lsph->z, lsph->nu,lsph->rho); } free(node_begin); free(node_end); free(nb_begin); free(nb_end); return 0; } /* * Function compute_density_3d_noomp: * Computes the SPH density contribution to the node_ cell from the nb_ cell. * Vectorization in the inner-most loop, but no parallelization. * * Arguments: * node_begin <int64_t> : Begin index for the cell the contribution is made to * node_end <int64_t> : End index for the cell the contribution is made to * nb_begin <int64_t> : Begin index for the cell the contribution is made from * nb_end <int64_t> : End index for the cell the contribution is made from * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * x <double*> : Array of particles' X positions * y <double*> : Array of particles' Y positions * z <double*> : Array of particles' Z positions * nu <double*> : Array of particles' density weights (i.e. masses) * Returns: * 0 : error code returned * rho <double*> : Array of particles' densities */ int compute_density_3d_chunk_noomp(int64_t node_begin, int64_t node_end, int64_t nb_begin, int64_t nb_end,double h, double* restrict x, double* restrict y, double* restrict z, double* restrict nu, double* restrict rho){ const double inv_h = 1./h; const double kernel_constant = w_bspline_3d_constant(h); for(int64_t ii=node_begin;ii<node_end;ii+=1){ // Iterate over the ii index of the chunk double xii = x[ii]; // Load the X component of the ii particle position double yii = y[ii]; // Load the Y component of the ii particle position double zii = z[ii]; // Load the Z component of the ii particle position double rhoii = 0.0; // Initialize the chunk contribution to density #pragma omp simd // Hint at the compiler to vectorize for(int64_t jj=nb_begin;jj<nb_end;jj+=1){ // Iterate over the each other particle in jj loop double q = 0.; // Initialize the distance double xij = xii-x[jj]; // Load and subtract jj particle's X position component double yij = yii-y[jj]; // Load and subtract jj particle's Y position component double zij = zii-z[jj]; // Load and subtract jj particle's Z position component q += xij*xij; // Add the jj contribution to the ii distance in X q += yij*yij; // Add the jj contribution to the ii distance in X q += zij*zij; // Add the jj contribution to the ii distance in X q = sqrt(q)*inv_h; // Sqrt to compute the distance rhoii += nu[jj]*w_bspline_3d_simd(q); // Add up the contribution from the jj particle } // to the intermediary density and then rho[ii] += rhoii*kernel_constant; // add the intermediary density to the full density } return 0; } /* * Function w_bspline_3d_constant: * Returns the 3d normalization constant for the cubic b-spline SPH smoothing kernel * * Arguments: * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * Returns: * 3d bspline normalization density <double> */ double w_bspline_3d_constant(double h){ return 3./(2.*M_PI*h*h*h); // 3d normalization value for the b-spline kernel } /* * Function w_bspline_3d_simd: * Returns the un-normalized value of the cubic b-spline SPH smoothing kernel * * Arguments: * q <double> : Distance between particles normalized by the smoothing length h * Returns: * wq <double> : Unnormalized value of the kernel * * Observation: * Why not else if(q<2.)? * Because if you use "else if", the compiler refuses to vectorize, * This results in a large slowdown, as of 2.5x slower for example_04 */ #pragma omp declare simd double w_bspline_3d_simd(double q){ double wq=0; double wq1 = (0.6666666666666666 - q*q + 0.5*q*q*q); // The first polynomial of the spline double wq2 = 0.16666666666666666*(2.-q)*(2.-q)*(2.-q); // The second polynomial of the spline if(q<2.) // If the distance is below 2 wq = wq2; // Use the 2nd polynomial for the spline if(q<1.) // If the distance is below 1 wq = wq1; // Use the 1st polynomial for the spline return wq; // return which ever value corresponds to the distance }
omp_for_lastprivate.c
<ompts:test> <ompts:testdescription>Test which checks the omp for lastprivate clause by counting up a variable in a parallelized loop. Each thread saves the next summand in a lastprivate variable i0. At the end i0 is compared to the value of the expected last summand.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp for lastprivate</ompts:directive> <ompts:dependences>omp critical,omp parallel firstprivate,omp schedule</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <math.h> #include "omp_testsuite.h" int sum0; #pragma omp threadprivate(sum0) int <ompts:testcode:functionname>omp_for_lastprivate</ompts:testcode:functionname> (FILE * logFile) { int sum = 0; int known_sum; <ompts:orphan:vars> int i0; </ompts:orphan:vars> i0 = -1; #pragma omp parallel { sum0 = 0; { /* Begin of orphaned block */ <ompts:orphan> int i; #pragma omp for schedule(static,7) <ompts:check>lastprivate(i0)</ompts:check> for (i = 1; i <= LOOPCOUNT; i++) { sum0 = sum0 + i; i0 = i; } /* end of for */ </ompts:orphan> } /* end of orphaned block */ #pragma omp critical { sum = sum + sum0; } /* end of critical */ } /* end of parallel */ known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; fprintf(logFile," known_sum = %d , sum = %d \n",known_sum,sum); fprintf(logFile," LOOPCOUNT = %d , i0 = %d \n",LOOPCOUNT,i0); return ((known_sum == sum) && (i0 == LOOPCOUNT) ); } </ompts:testcode> </ompts:test>
add_vect.c
#include <stdio.h> #define N 100000 int main(int argc, char **argv) { int i; int a[N], b[N], c[N], d[N]; for (i=0; i < N; i++) { b[i] = 2; c[i] = 5; } #pragma omp parallel { #pragma omp for for (i = 0; i < N; i++) a[i] = b[i] + c[i]; #pragma omp for for (i = 0; i < N; i++) d[i] = a[i] + b[i]; } printf("a[%d] = %d\n", 1, a[1]); printf("d[%d] = %d\n", 5, d[5]); return 0; }
convolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*9 + q*9; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i+1 < outh; i+=2) { int remain = outw; for (; remain>0; remain--) { float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } }
GB_unaryop__minv_int8_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int8_bool // op(A') function: GB_tran__minv_int8_bool // C type: int8_t // A type: bool // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ bool #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int8_bool ( int8_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int8_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
displacement_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * @details This class implements a convergence control based on nodal displacement (for penalty contact) * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor. * @param DispRatioTolerance Relative tolerance for displacement error * @param DispAbsTolerance Absolute tolerance for displacement error * @param RotRatioTolerance Relative tolerance for rotation error * @param RotAbsTolerance Absolute tolerance for rotation error * @param pTable The pointer to the output table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType RotRatioTolerance, const TDataType RotAbsTolerance, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); // The displacement solution mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The rotation solution mRotRatioTolerance = RotRatioTolerance; mRotAbsTolerance = RotAbsTolerance; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } // Copy constructor. DisplacementContactCriteria( DisplacementContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mRotRatioTolerance(rOther.mRotRatioTolerance) ,mRotAbsTolerance(rOther.mRotAbsTolerance) { } /// Destructor. ~DisplacementContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something // Initialize TDataType disp_solution_norm = 0.0, disp_increase_norm = 0.0; IndexType disp_dof_num(0); TDataType rot_solution_norm = 0.0, rot_increase_norm = 0.0; IndexType rot_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; TDataType dof_value = 0.0, dof_incr = 0.0; // Auxiliar displacement DoF check const std::function<bool(const VariableData&)> check_without_rot = [](const VariableData& rCurrVar) -> bool {return true;}; const std::function<bool(const VariableData&)> check_with_rot = [](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));}; const auto* p_check_disp = (mOptions.Is(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot; // Loop over Dofs #pragma omp parallel for reduction(+:disp_solution_norm,disp_increase_norm,disp_dof_num,rot_solution_norm,rot_increase_norm,rot_dof_num,dof_id,dof_value,dof_incr) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; if (it_dof->IsFree()) { dof_id = it_dof->EquationId(); dof_value = it_dof->GetSolutionStepValue(0); dof_incr = rDx[dof_id]; const auto& r_curr_var = it_dof->GetVariable(); if ((*p_check_disp)(r_curr_var)) { disp_solution_norm += std::pow(dof_value, 2); disp_increase_norm += std::pow(dof_incr, 2); ++disp_dof_num; } else { KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl; rot_solution_norm += std::pow(dof_value, 2); rot_increase_norm += std::pow(dof_incr, 2); ++rot_dof_num; } } } if(disp_increase_norm == 0.0) disp_increase_norm = 1.0; if(disp_solution_norm == 0.0) disp_solution_norm = 1.0; if(rot_increase_norm == 0.0) rot_increase_norm = 1.0; if(rot_solution_norm == 0.0) rot_solution_norm = 1.0; const TDataType disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm); const TDataType disp_abs = std::sqrt(disp_increase_norm)/ static_cast<TDataType>(disp_dof_num); const TDataType rot_ratio = std::sqrt(rot_increase_norm/rot_solution_norm); const TDataType rot_abs = std::sqrt(rot_increase_norm)/ static_cast<TDataType>(rot_dof_num); // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& Table = p_table->GetTable(); if (mOptions.Is(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { Table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << rot_ratio << mRotRatioTolerance << rot_abs << mRotAbsTolerance; } else { Table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance; } } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementContactCriteria") << BOLDFONT("\tROTATION: RATIO = ") << rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl; } } else { KRATOS_INFO("DisplacementContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementContactCriteria") << "\tROTATION: RATIO = " << rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl; } } } } // We check if converged const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance); const bool rot_converged = mOptions.Is(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED) ? (rot_ratio <= mRotRatioTolerance || rot_abs <= mRotAbsTolerance) : true; if (disp_converged && rot_converged) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& table = p_table->GetTable(); if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT)) table << BOLDFONT(FGRN(" Achieved")); else table << "Achieved"; } else { if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementContactCriteria") << "\tDoF convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& table = p_table->GetTable(); if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT)) table << BOLDFONT(FRED(" Not achieved")); else table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementContactCriteria") << "\tDoF convergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart ) override { BaseType::mConvergenceCriteriaIsInitialized = true; ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); if (mOptions.IsNot(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { r_table.AddColumn("RT RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); } r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementContactCriteria::TABLE_IS_INITIALIZED, true); } // Check rotation dof mOptions.Set(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart)); } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "displacement_contact_criteria", "ensure_contact" : false, "print_convergence_criterion" : false, "displacement_relative_tolerance" : 1.0e-4, "displacement_absolute_tolerance" : 1.0e-9, "rotation_relative_tolerance" : 1.0e-4, "rotation_absolute_tolerance" : 1.0e-9 })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "displacement_contact_criteria"; } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); // The displacement solution mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble(); // The rotation solution mRotRatioTolerance = ThisParameters["rotation_relative_tolerance"].GetDouble(); mRotAbsTolerance = ThisParameters["rotation_absolute_tolerance"].GetDouble(); // Set local flags mOptions.Set(DisplacementContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement TDataType mRotRatioTolerance; /// The ratio threshold for the norm of the rotation TDataType mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3)); } #endif /* KRATOS_DISPLACEMENT_CONTACT_CRITERIA_H */
5233.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { #pragma omp target teams distribute for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp target teams distribute for (i = 0; i < _PB_N; i++) { #pragma omp parallel for simd num_threads(8) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ #pragma omp target teams distribute for (j1 = 0; j1 < _PB_M; j1++) { #pragma omp parallel for simd num_threads(8) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
test_scatter.c
#include "config.h" #include <limits.h> #include <math.h> #include <stddef.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include <unistd.h> #if defined(_OPENMP) #include <omp.h> #endif #include "kseq.h" KSEQ_INIT(int, read) #if HAVE_SSE2 #include "ssw.h" #endif #include "parasail.h" #include "parasail/function_lookup.h" //#include "timer.h" #include "timer_real.h" /* This table is used to transform amino acid letters into numbers. */ static const int8_t table[128] = { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 20, 4, 3, 6, 13, 7, 8, 9, 23, 11, 10, 12, 2, 23, 14, 5, 1, 15, 16, 23, 19, 17, 22, 18, 21, 23, 23, 23, 23, 23, 23, 0, 20, 4, 3, 6, 13, 7, 8, 9, 23, 11, 10, 12, 2, 23, 14, 5, 1, 15, 16, 23, 19, 17, 22, 18, 21, 23, 23, 23, 23, 23 }; static inline void parse_sequences( const char *filename, char ***strings_, size_t **sizes_, size_t *count_) { FILE* fp; kseq_t *seq = NULL; int l = 0; char **strings = NULL; size_t *sizes = NULL; size_t count = 0; size_t memory = 1000; fp = fopen(filename, "r"); if(fp == NULL) { perror("fopen"); exit(1); } strings = malloc(sizeof(char*) * memory); sizes = malloc(sizeof(size_t) * memory); seq = kseq_init(fileno(fp)); while ((l = kseq_read(seq)) >= 0) { strings[count] = strdup(seq->seq.s); if (NULL == strings[count]) { perror("strdup"); exit(1); } sizes[count] = seq->seq.l; ++count; if (count >= memory) { char **new_strings = NULL; size_t *new_sizes = NULL; memory *= 2; new_strings = realloc(strings, sizeof(char*) * memory); if (NULL == new_strings) { perror("realloc"); exit(1); } strings = new_strings; new_sizes = realloc(sizes, sizeof(size_t) * memory); if (NULL == new_sizes) { perror("realloc"); exit(1); } sizes = new_sizes; } } kseq_destroy(seq); fclose(fp); *strings_ = strings; *sizes_ = sizes; *count_ = count; } static inline char* rand_string(size_t size) { char *str = NULL; const char charset[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; if (size) { size_t n; --size; str = malloc(size + 1); for (n = 0; n < size; n++) { int key = rand() % (int) (sizeof charset - 1); str[n] = charset[key]; } str[size] = '\0'; } return str; } static inline unsigned long binomial_coefficient(unsigned long n, unsigned long k) { /* from http://blog.plover.com/math/choose.html */ unsigned long r = 1; unsigned long d; if (k > n) { return 0; } for (d = 1; d <= k; d++) { r *= n--; r /= d; } return r; } static inline void k_combination2(unsigned long pos, unsigned long *a, unsigned long *b) { double s; double i = floor(sqrt(2.0 * pos)) - 1.0; if (i <= 1.0) { i = 1.0; } s = i * (i - 1.0) / 2.0; while (pos - s >= i) { s += i; i += 1; } *a = (unsigned long)(pos - s); *b = (unsigned long)(i); } int main(int argc, char **argv) { unsigned long shortest = INT_MAX; unsigned long longest = 0; double timer_clock = 0.0; unsigned long i = 0; size_t seq_count = 10; size_t limit = 0; char **sequences = NULL; size_t *sizes = NULL; char *endptr = NULL; char *funcname = NULL; parasail_function_t *function = NULL; int lanes = 1; char *filename = NULL; int c = 0; const char *matrixname = "blosum62"; const parasail_matrix_t *matrix = NULL; int gap_open = 10; int gap_extend = 1; int saturated = 0; while ((c = getopt(argc, argv, "a:b:f:n:o:e:")) != -1) { switch (c) { case 'a': funcname = optarg; break; case 'b': matrixname = optarg; break; case 'f': filename = optarg; break; case 'n': errno = 0; seq_count = strtol(optarg, &endptr, 10); if (errno) { perror("strtol"); exit(1); } break; case 'o': errno = 0; gap_open = strtol(optarg, &endptr, 10); if (errno) { perror("strtol"); exit(1); } break; case 'e': errno = 0; gap_extend = strtol(optarg, &endptr, 10); if (errno) { perror("strtol"); exit(1); } break; case '?': if (optopt == 'f' || optopt == 'n') { fprintf(stderr, "Option -%c requires an argument.\n", optopt); } else if (isprint(optopt)) { fprintf(stderr, "Unknown option `-%c'.\n", optopt); } else { fprintf(stderr, "Unknown option character `\\x%x'.\n", optopt); } exit(1); default: fprintf(stderr, "default case in getopt\n"); exit(1); } } /* select the function */ if (funcname) { int index = 0; parasail_function_info_t f; f = functions[index++]; while (f.pointer) { if (0 == strcmp(funcname, f.name)) { function = f.pointer; lanes = f.lanes; break; } f = functions[index++]; } if (NULL == function) { fprintf(stderr, "Specified function not found.\n"); exit(1); } } else { fprintf(stderr, "No alignment function specified.\n"); exit(1); } /* select the substitution matrix */ if (matrixname) { matrix = parasail_matrix_lookup(matrixname); if (NULL == matrix) { fprintf(stderr, "Specified substitution matrix not found.\n"); exit(1); } } if (filename) { parse_sequences(filename, &sequences, &sizes, &seq_count); } else { /* generate 'seq_count' number of random strings */ sequences = (char**)malloc(sizeof(char*)*seq_count); sizes = (size_t*)malloc(sizeof(size_t)*seq_count); for (i=0; i<seq_count; ++i) { sizes[i] = (rand()%32767)+10; shortest = sizes[i] < shortest ? sizes[i] : shortest; longest = sizes[i] > longest ? sizes[i] : longest; sequences[i] = rand_string(sizes[i]); } } limit = binomial_coefficient(seq_count, 2); printf("size_A,segLen,size_B,score,matches,similar,length,corrections,cells,time,"); printf("A_a,R_a,N_a,D_a,C_a,Q_a,E_a,G_a,H_a,I_a,L_a,K_a,M_a,F_a,P_a,S_a,T_a,W_a,Y_a,V_a,B_a,Z_a,X_a,NA_a,"); printf("A_b,R_b,N_b,D_b,C_b,Q_b,E_b,G_b,H_b,I_b,L_b,K_b,M_b,F_b,P_b,S_b,T_b,W_b,Y_b,V_b,B_b,Z_b,X_b,NA_b,"); printf("CUPS\n"); timer_clock = timer_real(); #pragma omp parallel { unsigned long a=0; unsigned long b=1; double timer_local = 0.0; unsigned long a_counts[24]; unsigned long b_counts[24]; unsigned long j; #pragma omp for schedule(dynamic) for (i=0; i<limit; ++i) { parasail_result_t *result = NULL; k_combination2(i, &a, &b); timer_local = timer_real(); result = function(sequences[a], sizes[a], sequences[b], sizes[b], gap_open, gap_extend, matrix); timer_local = timer_real() - timer_local; for (j=0; j<24; ++j) { a_counts[j] = 0; b_counts[j] = 0; } for (j=0; j<sizes[a]; ++j) { a_counts[table[(unsigned)sequences[a][j]]] += 1; } for (j=0; j<sizes[b]; ++j) { b_counts[table[(unsigned)sequences[b][j]]] += 1; } #pragma omp critical printf("%lu,%lu,%lu,%d,%d,%d,%d,%lu,%f", (unsigned long)sizes[a], (unsigned long)(sizes[a]+lanes-1)/lanes, (unsigned long)sizes[b], result->score, result->matches, result->similar, result->length, (unsigned long)(sizes[a]*sizes[b]), timer_local); for (j=0; j<24; ++j) { //printf(",%lu", a_counts[j]); printf(",%f", (double)(a_counts[j])/sizes[a]); } for (j=0; j<24; ++j) { //printf(",%lu", b_counts[j]); printf(",%f", (double)(b_counts[j])/sizes[b]); } printf(",%f\n", sizes[a]*sizes[b]/timer_local); #pragma omp atomic saturated += result->saturated; parasail_result_free(result); } } timer_clock = timer_real() - timer_clock; return 0; }
mixed_tentusscher_myo_epi_2004_S3_20.c
// Scenario 3 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt + Rc) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S3_20.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5416381029710,0.00129742313431501,0.779058087874356,0.778951275699783,0.000175400267410166,0.484813241067308,0.00294587325391635,0.999998339341719,1.94207059338896e-08,1.89778840917076e-05,0.999772653033000,1.00721993170388,0.999996907554520,4.22421886024410e-05,0.744054308738152,10.2766651112694,139.172056496758}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.3267748982652,0.000369396880965334,0.000138718412791722,0.000274765995516752,0.222557047894483,0.131225943240472,0.194018855199521,4.70098964246625,0.0175173968211143,1.45392118187522,1093.48753540057,0.000621762218099826,0.341961934777053,0.0120870127836469,0.00451100911423527,3.31418392030779e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
csr-parser.c
#include <stdio.h> #include <stdlib.h> #include <regex.h> #include <assert.h> #include <sys/param.h> #include "csr-parser.h" #define READ_CHAR_BUFFER_SIZE 102400 static size_t parse_number_of_nodes(const char *nodes_mtx, regex_t *regex_comment) { FILE *fp; char buff[READ_CHAR_BUFFER_SIZE]; int reti; size_t num_nodes_1, num_nodes_2; char *p_end; num_nodes_1 = 0; fp = fopen(nodes_mtx, "r"); if(fp == NULL) { fprintf(stderr, "Unable to open file: '%s'", nodes_mtx); exit(EXIT_FAILURE); } while ( fgets(buff, READ_CHAR_BUFFER_SIZE, fp) != NULL ) { reti = regexec(regex_comment, buff, 0, NULL, 0); if(reti == REG_NOMATCH) { num_nodes_1 = strtoul(buff, &p_end, 10); num_nodes_2 = strtoul(p_end, &p_end, 10); assert(num_nodes_1 == num_nodes_2); assert(num_nodes_1 >= 0); break; } } fclose(fp); return num_nodes_1; } static size_t parse_number_of_node_states(const char *nodes_mtx, regex_t *regex_comment) { FILE *fp; char buff[READ_CHAR_BUFFER_SIZE]; int reti; size_t node_1, node_2, num_beliefs; char *p_end, *prev; char no_skip = 0; num_beliefs = 0; fp = fopen(nodes_mtx, "r"); if(fp == NULL) { fprintf(stderr, "Unable to open file: '%s'", nodes_mtx); exit(EXIT_FAILURE); } while ( fgets(buff, READ_CHAR_BUFFER_SIZE, fp) != NULL ) { reti = regexec(regex_comment, buff, 0, NULL, 0); if(reti == REG_NOMATCH && no_skip == 0) { no_skip = 1; } else if(reti == REG_NOMATCH && no_skip > 0) { node_1 = strtoul(buff, &p_end, 10); node_2 = strtoul(p_end, &p_end, 10); assert(node_1 == node_2); prev = p_end; strtof(p_end, &p_end); while(p_end != prev) { num_beliefs += 1; prev = p_end; strtof(p_end, &p_end); } break; } } fclose(fp); assert(num_beliefs <= MAX_STATES); return (size_t)num_beliefs; } static size_t parse_number_of_edges(const char *edges_mtx, regex_t *regex_comment) { FILE *fp; char buff[READ_CHAR_BUFFER_SIZE]; int reti; size_t num_cols, num_rows, num_non_zeroes; char *p_end; fp = fopen(edges_mtx, "r"); if(fp == NULL) { fprintf(stderr, "Unable to open file: '%s'", edges_mtx); exit(EXIT_FAILURE); } num_non_zeroes = 0; while ( fgets(buff, READ_CHAR_BUFFER_SIZE, fp) != NULL ) { reti = regexec(regex_comment, buff, 0, NULL, 0); if(reti == REG_NOMATCH) { num_cols = strtoul(buff, &p_end, 10); num_rows = strtoul(p_end, &p_end, 10); num_non_zeroes = strtoul(p_end, &p_end, 10); assert(num_rows >= 0); assert(num_cols >= 0); assert(num_non_zeroes >= 0); break; } } fclose(fp); return num_non_zeroes; } static void add_nodes(Graph_t graph, const char *nodes_mtx, regex_t *comment_regex, size_t num_states) { FILE *fp; char buff[READ_CHAR_BUFFER_SIZE]; char name[READ_CHAR_BUFFER_SIZE]; char *p_end, *prev; int reti; char found_header; size_t node_id_1, node_id_2; float prob; struct belief curr_belief; size_t curr_belief_index; found_header = 0; // set up belief fp = fopen(nodes_mtx, "r"); if(fp == NULL) { fprintf(stderr, "Unable to open file: '%s'", nodes_mtx); exit(EXIT_FAILURE); } while ( fgets(buff, READ_CHAR_BUFFER_SIZE, fp) != NULL ) { curr_belief_index = 0; reti = regexec(comment_regex, buff, 0, NULL, 0); if(reti == REG_NOMATCH) { // skip over header line if(found_header == 0) { found_header = 1; } else { node_id_1 = strtoul(buff, &p_end, 10); node_id_2 = strtoul(p_end, &p_end, 10); assert(node_id_1 == node_id_2); assert(node_id_1 >= 1); sprintf(name, "Node %ld", node_id_1); prev = p_end; prob = strtof(p_end, &p_end); while(p_end != prev) { assert(prob >= 0.0f); assert(prob <= 1.0f); curr_belief.data[curr_belief_index] = prob; curr_belief_index++; prev = p_end; prob = strtof(p_end, &p_end); } assert(num_states == curr_belief_index); // check if observed node if(curr_belief.data[0] < DEFAULT_STATE) { graph_add_and_set_node_state(graph, num_states, name, &curr_belief); } else { graph_add_node(graph, num_states, name); } } } } fclose(fp); } static void add_edges(Graph_t graph, const char *edges_mtx, regex_t *comment_regex, size_t num_states) { FILE *fp; char buff[READ_CHAR_BUFFER_SIZE]; int reti; char found_header; char *p_end, *prev; size_t src_id, dest_id; size_t src_index, dest_index, x, y; float prob; found_header = 0; fp = fopen(edges_mtx, "r"); if(fp == NULL) { fprintf(stderr, "Unable to open file: '%s'", edges_mtx); exit(EXIT_FAILURE); } while ( fgets(buff, READ_CHAR_BUFFER_SIZE, fp) != NULL ) { x = 0; y = 0; reti = regexec(comment_regex, buff, 0, NULL, 0); if(reti == REG_NOMATCH) { if(found_header == 0) { found_header = 1; } else { src_id = strtoul(buff, &p_end, 10); dest_id = strtoul(p_end, &p_end, 10); assert(src_id > 0); assert(dest_id > 0); src_index = (size_t)(src_id - 1); dest_index = (size_t)(dest_id - 1); graph_add_edge(graph, src_index, dest_index, num_states, num_states); } } } fclose(fp); } static void add_edge_to_diameter_calc(const char * edges_mtx, size_t * dist, size_t num_nodes, regex_t *comment_regex) { FILE *fp; char buff[READ_CHAR_BUFFER_SIZE]; int reti; char found_header; char *p_end, *prev; size_t src_id, dest_id; size_t src_index, dest_index; found_header = 0; fp = fopen(edges_mtx, "r"); if (fp == NULL) { fprintf(stderr, "Unable to open file: '%s'", edges_mtx); exit(EXIT_FAILURE); } while (fgets(buff, READ_CHAR_BUFFER_SIZE, fp) != NULL) { reti = regexec(comment_regex, buff, 0, NULL, 0); if (reti == REG_NOMATCH) { if (found_header == 0) { found_header = 1; } else { src_id = strtoul(buff, &p_end, 10); dest_id = strtoul(p_end, &p_end, 10); assert(src_id > 0); assert(dest_id > 0); src_index = (size_t) (src_id - 1); dest_index = (size_t) (dest_id - 1); dist[src_index * num_nodes + dest_index] = 1; } } } fclose(fp); } Graph_t build_graph_from_mtx(const char *edges_mtx, const char *nodes_mtx, const struct joint_probability * edge_joint_probability, size_t dim_x, size_t dim_y) { regex_t regex_comment; int reti; size_t num_nodes, num_edges; size_t num_node_states; Graph_t graph; // compile comment regex reti = regcomp(&regex_comment, "^[[:space:]]*%", 0); if (reti) { perror("Could not compile regex\n"); exit(1); } num_nodes = parse_number_of_nodes(nodes_mtx, &regex_comment); num_node_states = parse_number_of_node_states(nodes_mtx, &regex_comment); assert(num_node_states > 0); assert(num_node_states <= MAX_STATES); num_edges = parse_number_of_edges(edges_mtx, &regex_comment); graph = create_graph(num_nodes, num_edges, edge_joint_probability, dim_x, dim_y); add_nodes(graph, nodes_mtx, &regex_comment, num_node_states); add_edges(graph, edges_mtx, &regex_comment, num_node_states); regfree(&regex_comment); return graph; } size_t calculate_diameter_from_mtx(const char *edges_mtx, const char *nodes_mtx) { regex_t regex_comment; int reti; size_t num_nodes; // compile comment regex reti = regcomp(&regex_comment, "^[[:space:]]*%", 0); if (reti) { perror("Could not compile regex\n"); exit(1); } num_nodes = parse_number_of_nodes(nodes_mtx, &regex_comment); assert(num_nodes > 0); size_t * dist = (size_t *)malloc(sizeof(size_t) * num_nodes * num_nodes); assert(dist); for(size_t i = 0; i < num_nodes; ++i) { for(size_t j = 0; j < num_nodes; ++j) { if(i == j) { dist[i * num_nodes + j] = 0; } else { dist[i * num_nodes + j] = WEIGHT_INFINITY; } } } add_edge_to_diameter_calc(edges_mtx, dist, num_nodes, &regex_comment); for(size_t k = 0; k < num_nodes; ++k){ #pragma omp parallel for shared(dist) for(size_t i = 0; i < num_nodes; ++i){ for(size_t j = 0; j < num_nodes; ++j){ dist[i * num_nodes + j] = MIN(dist[i * num_nodes + k] + dist[k * num_nodes + j], dist[i * num_nodes + j]); } } } size_t diameter = dist[00]; for(size_t i = 0; i < num_nodes; ++i) { for (size_t j = 0; j < num_nodes; ++j) { if (dist[i * num_nodes + j] >= WEIGHT_INFINITY) { continue; } diameter = MAX(diameter, dist[i * num_nodes + j]); } } free(dist); regfree(&regex_comment); return diameter; }
constitute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE % % C O O NN N SS T I T U U T E % % C O O N N N ESSS T I T U U T EEE % % C O O N NN SS T I T U U T E % % CCCC OOO N N SSSSS T IIIII T UUU T EEEEE % % % % % % MagickCore Methods to Consitute an Image % % % % Software Design % % Cristy % % October 1998 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/client.h" #include "MagickCore/coder-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/constitute-private.h" #include "MagickCore/delegate.h" #include "MagickCore/geometry.h" #include "MagickCore/identify.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/statistic.h" #include "MagickCore/stream.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n s t i t u t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteImage() returns an image from the pixel data you supply. % The pixel data must be in scanline order top-to-bottom. The data can be % char, short int, int, float, or double. Float and double require the % pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to % create a 640x480 image from unsigned red-green-blue character data, use: % % image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception); % % The format of the ConstituteImage method is: % % Image *ConstituteImage(const size_t columns,const size_t rows, % const char *map,const StorageType storage,const void *pixels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: width in pixels of the image. % % o rows: height in pixels of the image. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose % from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConstituteImage(const size_t columns,const size_t rows, const char *map,const StorageType storage,const void *pixels, ExceptionInfo *exception) { Image *image; MagickBooleanType status; ssize_t i; size_t length; /* Allocate image structure. */ assert(map != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map); assert(pixels != (void *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage((ImageInfo *) NULL,exception); if (image == (Image *) NULL) return((Image *) NULL); switch (storage) { case CharPixel: image->depth=8*sizeof(unsigned char); break; case DoublePixel: image->depth=8*sizeof(double); break; case FloatPixel: image->depth=8*sizeof(float); break; case LongPixel: image->depth=8*sizeof(unsigned long); break; case LongLongPixel: image->depth=8*sizeof(MagickSizeType); break; case ShortPixel: image->depth=8*sizeof(unsigned short); break; default: break; } length=strlen(map); for (i=0; i < (ssize_t) length; i++) { switch (map[i]) { case 'a': case 'A': case 'O': case 'o': { image->alpha_trait=BlendPixelTrait; break; } case 'C': case 'c': case 'm': case 'M': case 'Y': case 'y': case 'K': case 'k': { image->colorspace=CMYKColorspace; break; } case 'I': case 'i': { image->colorspace=GRAYColorspace; break; } default: { if (length == 1) image->colorspace=GRAYColorspace; break; } } } status=SetImageExtent(image,columns,rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImage() returns all the properties of an image or image sequence % except for the pixels. It is much faster and consumes far less memory % than ReadImage(). On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the PingImage method is: % % Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Ping the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static size_t PingStream(const Image *magick_unused(image), const void *magick_unused(pixels),const size_t columns) { magick_unreferenced(image); magick_unreferenced(pixels); return(columns); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport Image *PingImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; ImageInfo *ping_info; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); ping_info=CloneImageInfo(image_info); ping_info->ping=MagickTrue; image=ReadStream(ping_info,&PingStream,exception); if (image != (Image *) NULL) { ResetTimer(&image->timer); if (ping_info->verbose != MagickFalse) (void) IdentifyImage(image,stdout,MagickFalse,exception); } ping_info=DestroyImageInfo(ping_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImages() pings one or more images and returns them as an image list. % % The format of the PingImage method is: % % Image *PingImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PingImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char ping_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Ping image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); (void) SetImageOption(image_info,"filename",filename); (void) CopyMagickString(image_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename, (int) image_info->scene,ping_filename,exception); if (LocaleCompare(ping_filename,image_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ read_info=CloneImageInfo(image_info); sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes == 0) { read_info=DestroyImageInfo(read_info); return(PingImage(image_info,exception)); } (void) CopyMagickString(ping_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename, (int) scene,read_info->filename,exception); image=PingImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } return(PingImage(image_info,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImage() reads an image or image sequence from a file or file handle. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadImage method is: % % Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Read the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType IsCoderAuthorized(const char *coder, const PolicyRights rights,ExceptionInfo *exception) { if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",coder); return(MagickFalse); } return(MagickTrue); } MagickExport Image *ReadImage(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MagickPathExtent], magick[MagickPathExtent], magick_filename[MagickPathExtent]; const char *value; const DelegateInfo *delegate_info; const MagickInfo *magick_info; DecodeImageHandler *decoder; ExceptionInfo *sans_exception; GeometryInfo geometry_info; Image *image, *next; ImageInfo *read_info; MagickBooleanType status; MagickStatusType flags; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image_info->filename != (char *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); (void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent); (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) CopyMagickString(magick,read_info->magick,MagickPathExtent); /* Call appropriate image reader based on image type. */ sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(read_info->magick,sans_exception); if (sans_exception->severity == PolicyError) InheritException(exception,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) read_info->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } if ((magick_info != (const MagickInfo *) NULL) && (GetMagickDecoderSeekableStream(magick_info) != MagickFalse)) { image=AcquireImage(read_info,exception); (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } if (IsBlobSeekable(image) == MagickFalse) { /* Coder requires a seekable stream. */ *read_info->filename='\0'; status=ImageToFile(image,read_info->filename,exception); if (status == MagickFalse) { (void) CloseBlob(image); read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } read_info->temporary=MagickTrue; } (void) CloseBlob(image); image=DestroyImage(image); } image=NewImageList(); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(read_info->filename,filename, MagickPathExtent); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); } } if (decoder != (DecodeImageHandler *) NULL) { /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=decoder(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); if (read_info->temporary != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Let our decoding delegate process the image. */ image=AcquireImage(read_info,exception); if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return((Image *) NULL); } (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); *read_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL, exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); image=DestroyImageList(image); read_info->temporary=MagickTrue; if (status != MagickFalse) (void) SetImageInfo(read_info,0,exception); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { if (IsPathAccessible(read_info->filename) != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); else ThrowFileException(exception,FileOpenError,"UnableToOpenFile", read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=(decoder)(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } if (read_info->temporary != MagickFalse) { (void) RelinquishUniqueFileResource(read_info->filename); read_info->temporary=MagickFalse; if (image != (Image *) NULL) (void) CopyMagickString(image->filename,filename,MagickPathExtent); } if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return(image); } if (exception->severity >= ErrorException) (void) LogMagickEvent(ExceptionEvent,GetMagickModule(), "Coder (%s) generated an image despite an error (%d), " "notify the developers",image->magick,exception->severity); if (IsBlobTemporary(image) != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) && (GetImageListLength(image) != 1)) { Image *clones; clones=CloneImages(image,read_info->scenes,exception); if (clones != (Image *) NULL) { image=DestroyImageList(image); image=GetFirstImageInList(clones); } } for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { char magick_path[MagickPathExtent], *property, timestamp[MagickTimeExtent]; const char *option; const StringInfo *profile; ssize_t option_type; static const char *source_date_epoch = (const char *) NULL; static MagickBooleanType epoch_initalized = MagickFalse; next->taint=MagickFalse; GetPathComponent(magick_filename,MagickPath,magick_path); if ((*magick_path == '\0') && (*next->magick == '\0')) (void) CopyMagickString(next->magick,magick,MagickPathExtent); (void) CopyMagickString(next->magick_filename,magick_filename, MagickPathExtent); if (IsBlobTemporary(image) != MagickFalse) (void) CopyMagickString(next->filename,filename,MagickPathExtent); if (next->magick_columns == 0) next->magick_columns=next->columns; if (next->magick_rows == 0) next->magick_rows=next->rows; (void) GetImageProperty(next,"exif:*",exception); (void) GetImageProperty(next,"icc:*",exception); (void) GetImageProperty(next,"iptc:*",exception); (void) GetImageProperty(next,"xmp:*",exception); option=GetImageOption(image_info,"exif:sync-image"); if (IsStringFalse(option) == MagickFalse) { value=GetImageProperty(next,"exif:Orientation",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:Orientation",exception); if (value != (char *) NULL) { next->orientation=(OrientationType) StringToLong(value); (void) DeleteImageProperty(next,"tiff:Orientation"); (void) DeleteImageProperty(next,"exif:Orientation"); } value=GetImageProperty(next,"exif:XResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.x; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.x=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:XResolution"); } value=GetImageProperty(next,"exif:YResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.y; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.y=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:YResolution"); } value=GetImageProperty(next,"exif:ResolutionUnit",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:ResolutionUnit",exception); if (value != (char *) NULL) { option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse, value); if (option_type >= 0) next->units=(ResolutionType) option_type; (void) DeleteImageProperty(next,"exif:ResolutionUnit"); (void) DeleteImageProperty(next,"tiff:ResolutionUnit"); } } if (next->page.width == 0) next->page.width=next->columns; if (next->page.height == 0) next->page.height=next->rows; option=GetImageOption(read_info,"caption"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"caption",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"comment"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"comment",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"label"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"label",property,exception); property=DestroyString(property); } if (LocaleCompare(next->magick,"TEXT") == 0) (void) ParseAbsoluteGeometry("0x0+0+0",&next->page); if ((read_info->extract != (char *) NULL) && (read_info->stream == (StreamHandler) NULL)) { RectangleInfo geometry; SetGeometry(next,&geometry); flags=ParseAbsoluteGeometry(read_info->extract,&geometry); if ((next->columns != geometry.width) || (next->rows != geometry.height)) { if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { Image *crop_image; crop_image=CropImage(next,&geometry,exception); if (crop_image != (Image *) NULL) ReplaceImageInList(&next,crop_image); } else if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0)) { Image *size_image; flags=ParseRegionGeometry(next,read_info->extract,&geometry, exception); size_image=ResizeImage(next,geometry.width,geometry.height, next->filter,exception); if (size_image != (Image *) NULL) ReplaceImageInList(&next,size_image); } } } profile=GetImageProfile(next,"icc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"icm"); profile=GetImageProfile(next,"iptc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"8bim"); if (epoch_initalized == MagickFalse) { source_date_epoch=getenv("SOURCE_DATE_EPOCH"); epoch_initalized=MagickTrue; } if (source_date_epoch == (const char *) NULL) { (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime, sizeof(timestamp),timestamp); (void) SetImageProperty(next,"date:modify",timestamp,exception); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime, sizeof(timestamp),timestamp); (void) SetImageProperty(next,"date:create",timestamp,exception); } option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (next->delay > (size_t) floor(geometry_info.rho+0.5)) next->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (next->delay < (size_t) floor(geometry_info.rho+0.5)) next->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } else next->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) next->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) { option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse, option); if (option_type >= 0) next->dispose=(DisposeType) option_type; } if (read_info->verbose != MagickFalse) (void) IdentifyImage(next,stderr,MagickFalse,exception); image=next; } read_info=DestroyImageInfo(read_info); if (GetBlobError(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImages() reads one or more images and returns them as an image list. % % The format of the ReadImage method is: % % Image *ReadImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char read_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Read image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; (void) SetImageOption(read_info,"filename",filename); (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(read_info,(Image *) NULL,filename, (int) read_info->scene,read_filename,exception); if (LocaleCompare(read_filename,read_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes != 0) { (void) CopyMagickString(read_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); scene=(ssize_t) read_info->scene; for ( ; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL, read_filename,(int) scene,read_info->filename,exception); image=ReadImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } } (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d I n l i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadInlineImage() reads a Base64-encoded inline image or image sequence. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadInlineImage method is: % % Image *ReadInlineImage(const ImageInfo *image_info,const char *content, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o content: the image encoded in Base64. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadInlineImage(const ImageInfo *image_info, const char *content,ExceptionInfo *exception) { Image *image; ImageInfo *read_info; unsigned char *blob; size_t length; const char *p; /* Skip over header (e.g. data:image/gif;base64,). */ image=NewImageList(); for (p=content; (*p != ',') && (*p != '\0'); p++) ; if (*p == '\0') ThrowReaderException(CorruptImageError,"CorruptImage"); blob=Base64Decode(++p,&length); if (length == 0) { blob=(unsigned char *) RelinquishMagickMemory(blob); ThrowReaderException(CorruptImageError,"CorruptImage"); } read_info=CloneImageInfo(image_info); (void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL, (void *) NULL); *read_info->filename='\0'; *read_info->magick='\0'; for (p=content; (*p != '/') && (*p != '\0'); p++) ; if (*p != '\0') { char *q; ssize_t i; /* Extract media type. */ if (LocaleNCompare(++p,"x-",2) == 0) p+=2; (void) strcpy(read_info->filename,"data."); q=read_info->filename+5; for (i=0; (*p != ';') && (*p != '\0') && (i < (MagickPathExtent-6)); i++) *q++=(*p++); *q++='\0'; } image=BlobToImage(read_info,blob,length,exception); blob=(unsigned char *) RelinquishMagickMemory(blob); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImage() writes an image or an image sequence to a file or file handle. % If writing to a file is on disk, the name is defined by the filename member % of the image structure. WriteImage() returns MagickFalse is there is a % memory shortage or if the image cannot be written. Check the exception % member of image to determine the cause for any failure. % % The format of the WriteImage method is: % % MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char filename[MagickPathExtent]; const char *option; const DelegateInfo *delegate_info; const MagickInfo *magick_info; EncodeImageHandler *encoder; ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType status, temporary; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); sans_exception=AcquireExceptionInfo(); write_info=CloneImageInfo(image_info); (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) SetImageInfo(write_info,1,sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); /* Call appropriate image writer based on image type. */ magick_info=GetMagickInfo(write_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(write_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) image->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } (void) SyncImageProfiles(image); DisassociateImageStream(image); option=GetImageOption(image_info,"delegate:bimodal"); if ((IsStringTrue(option) != MagickFalse) && (write_info->page == (char *) NULL) && (GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) == (Image *) NULL) && (IsTaintImage(image) == MagickFalse) ) { delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception); if ((delegate_info != (const DelegateInfo *) NULL) && (GetDelegateMode(delegate_info) == 0) && (IsPathAccessible(image->magick_filename) != MagickFalse)) { /* Process image with bi-modal delegate. */ (void) CopyMagickString(image->filename,image->magick_filename, MagickPathExtent); status=InvokeDelegate(write_info,image,image->magick, write_info->magick,exception); write_info=DestroyImageInfo(write_info); (void) CopyMagickString(image->filename,filename,MagickPathExtent); return(status); } } status=MagickFalse; temporary=MagickFalse; if ((magick_info != (const MagickInfo *) NULL) && (GetMagickEncoderSeekableStream(magick_info) != MagickFalse)) { char image_filename[MagickPathExtent]; (void) CopyMagickString(image_filename,image->filename,MagickPathExtent); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); (void) CopyMagickString(image->filename, image_filename,MagickPathExtent); if (status != MagickFalse) { if (IsBlobSeekable(image) == MagickFalse) { /* A seekable stream is required by the encoder. */ write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) AcquireUniqueFilename(image->filename); temporary=MagickTrue; } (void) CloseBlob(image); } } encoder=GetImageEncoder(magick_info); if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception); if (delegate_info != (DelegateInfo *) NULL) { /* Process the image with delegate. */ *write_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(write_info,image,(char *) NULL, write_info->magick,exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); (void) CopyMagickString(image->filename,filename,MagickPathExtent); } else { sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(write_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(write_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if ((write_info->affirm == MagickFalse) && (magick_info == (const MagickInfo *) NULL)) { (void) CopyMagickString(write_info->magick,image->magick, MagickPathExtent); magick_info=GetMagickInfo(write_info->magick,exception); } encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) { char extension[MagickPathExtent]; GetPathComponent(image->filename,ExtensionPath,extension); if (*extension != '\0') magick_info=GetMagickInfo(extension,exception); else magick_info=GetMagickInfo(image->magick,exception); (void) CopyMagickString(image->filename,filename, MagickPathExtent); encoder=GetImageEncoder(magick_info); } if (encoder == (EncodeImageHandler *) NULL) { magick_info=GetMagickInfo(image->magick,exception); encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoEncodeDelegateForThisImageFormat", "`%s'",write_info->magick); } if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights, exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } } } if (temporary != MagickFalse) { /* Copy temporary image file to permanent. */ status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception); if (status != MagickFalse) { (void) RelinquishUniqueFileResource(write_info->filename); status=ImageToFile(image,write_info->filename,exception); } (void) CloseBlob(image); (void) RelinquishUniqueFileResource(image->filename); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); } if ((LocaleCompare(write_info->magick,"info") != 0) && (write_info->verbose != MagickFalse)) (void) IdentifyImage(image,stdout,MagickFalse,exception); write_info=DestroyImageInfo(write_info); if (GetBlobError(image) != MagickFalse) ThrowWriterException(FileOpenError,"UnableToWriteFile"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImages() writes an image sequence into one or more files. While % WriteImage() can write an image sequence, it is limited to writing % the sequence into a single file using a format which supports multiple % frames. WriteImages(), however, does not have this limitation, instead it % generates multiple output files if necessary (or when requested). When % ImageInfo's adjoin flag is set to MagickFalse, the file name is expected % to include a printf-style formatting string for the frame number (e.g. % "image%02d.png"). % % The format of the WriteImages method is: % % MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o images: the image list. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info, Image *images,const char *filename,ExceptionInfo *exception) { #define WriteImageTag "Write/Image" ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType proceed; MagickOffsetType progress; MagickProgressMonitor progress_monitor; MagickSizeType number_images; MagickStatusType status; Image *p; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); write_info=CloneImageInfo(image_info); *write_info->magick='\0'; images=GetFirstImageInList(images); if (filename != (const char *) NULL) for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) (void) CopyMagickString(p->filename,filename,MagickPathExtent); (void) CopyMagickString(write_info->filename,images->filename, MagickPathExtent); sans_exception=AcquireExceptionInfo(); (void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images), sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent); p=images; for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p)) { Image *next; next=GetNextImageInList(p); if (next == (Image *) NULL) break; if (p->scene >= next->scene) { ssize_t i; /* Generate consistent scene numbers. */ i=(ssize_t) images->scene; for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) p->scene=(size_t) i++; break; } } /* Write images. */ status=MagickTrue; progress_monitor=(MagickProgressMonitor) NULL; progress=0; number_images=GetImageListLength(images); for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) { if (number_images != 1) progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL, p->client_data); status&=WriteImage(write_info,p,exception); if (number_images != 1) (void) SetImageProgressMonitor(p,progress_monitor,p->client_data); if (write_info->adjoin != MagickFalse) break; if (number_images != 1) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(p,WriteImageTag,progress,number_images); if (proceed == MagickFalse) break; } } write_info=DestroyImageInfo(write_info); return(status != 0 ? MagickTrue : MagickFalse); }
ep.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - EP This benchmark is an OpenMP C version of the NPB EP code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Author: P. O. Frederickson D. H. Bailey A. C. Woo OpenMP C version: S. Satoh --------------------------------------------------------------------*/ #include "npb-C.h" #include "npbparams.h" /* parameters */ #define MK 16 #define MM (M - MK) #define NN (1 << MM) #define NK (1 << MK) #define NQ 10 #define EPSILON 1.0e-8 #define A 1220703125.0 #define S 271828183.0 #define TIMERS_ENABLED FALSE /* global variables */ /* common /storage/ */ static double x[2*NK]; #pragma omp threadprivate(x) static double q[NQ]; /*-------------------------------------------------------------------- program EMBAR c-------------------------------------------------------------------*/ /* c This is the serial version of the APP Benchmark 1, c the "embarassingly parallel" benchmark. c c M is the Log_2 of the number of complex pairs of uniform (0, 1) random c numbers. MK is the Log_2 of the size of each batch of uniform random c numbers. MK can be set for convenience on a given system, since it does c not affect the results. */ int main(int argc, char **argv) { double Mops, t1, t2, t3, t4, x1, x2, sx, sy, tm, an, tt, gc; double dum[3] = { 1.0, 1.0, 1.0 }; int np, ierr, node, no_nodes, i, ik, kk, l, k, nit, ierrcode, no_large_nodes, np_add, k_offset, j; int nthreads = 1; boolean verified; char size[13+1]; /* character*13 */ /* c Because the size of the problem is too large to store in a 32-bit c integer for some classes, we put it into a string (for printing). c Have to strip off the decimal point put in there by the floating c point print statement (internal file) */ printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - EP Benchmark\n"); sprintf(size, "%12.0f", pow(2.0, M+1)); for (j = 13; j >= 1; j--) { if (size[j] == '.') size[j] = ' '; } printf(" Number of random numbers generated: %13s\n", size); verified = FALSE; /* c Compute the number of "batches" of random number pairs generated c per processor. Adjust if the number of processors does not evenly c divide the total number */ np = NN; /* c Call the random number generator functions and initialize c the x-array to reduce the effects of paging on the timings. c Also, call all mathematical functions that are used. Make c sure these initializations cannot be eliminated as dead code. */ vranlc(0, &(dum[0]), dum[1], &(dum[2])); dum[0] = randlc(&(dum[1]), dum[2]); for (i = 0; i < 2*NK; i++) x[i] = -1.0e99; Mops = log(sqrt(fabs(max(1.0, 1.0)))); timer_clear(1); timer_clear(2); timer_clear(3); timer_start(1); vranlc(0, &t1, A, x); /* Compute AN = A ^ (2 * NK) (mod 2^46). */ t1 = A; for ( i = 1; i <= MK+1; i++) { t2 = randlc(&t1, t1); } an = t1; tt = S; gc = 0.0; sx = 0.0; sy = 0.0; for ( i = 0; i <= NQ - 1; i++) { q[i] = 0.0; } /* c Each instance of this loop may be performed independently. We compute c the k offsets separately to take into account the fact that some nodes c have more numbers to generate than others */ k_offset = -1; #pragma omp parallel copyin(x) { double t1, t2, t3, t4, x1, x2; int kk, i, ik, l; double qq[NQ]; /* private copy of q[0:NQ-1] */ for (i = 0; i < NQ; i++) qq[i] = 0.0; #pragma omp for reduction(+:sx,sy) schedule(static) for (k = 1; k <= np; k++) { kk = k_offset + k; t1 = S; t2 = an; /* Find starting seed t1 for this kk. */ for (i = 1; i <= 100; i++) { ik = kk / 2; if (2 * ik != kk) t3 = randlc(&t1, t2); if (ik == 0) break; t3 = randlc(&t2, t2); kk = ik; } /* Compute uniform pseudorandom numbers. */ if (TIMERS_ENABLED == TRUE) timer_start(3); vranlc(2*NK, &t1, A, x-1); if (TIMERS_ENABLED == TRUE) timer_stop(3); /* c Compute Gaussian deviates by acceptance-rejection method and c tally counts in concentric square annuli. This loop is not c vectorizable. */ if (TIMERS_ENABLED == TRUE) timer_start(2); for ( i = 0; i < NK; i++) { x1 = 2.0 * x[2*i] - 1.0; x2 = 2.0 * x[2*i+1] - 1.0; t1 = pow2(x1) + pow2(x2); if (t1 <= 1.0) { t2 = sqrt(-2.0 * log(t1) / t1); t3 = (x1 * t2); /* Xi */ t4 = (x2 * t2); /* Yi */ l = max(fabs(t3), fabs(t4)); qq[l] += 1.0; /* counts */ sx = sx + t3; /* sum of Xi */ sy = sy + t4; /* sum of Yi */ } } if (TIMERS_ENABLED == TRUE) timer_stop(2); } #pragma omp critical { for (i = 0; i <= NQ - 1; i++) q[i] += qq[i]; } #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end of parallel region */ for (i = 0; i <= NQ-1; i++) { gc = gc + q[i]; } timer_stop(1); tm = timer_read(1); nit = 0; if (M == 24) { if((fabs((sx- (-3.247834652034740e3))/sx) <= EPSILON) && (fabs((sy- (-6.958407078382297e3))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 25) { if ((fabs((sx- (-2.863319731645753e3))/sx) <= EPSILON) && (fabs((sy- (-6.320053679109499e3))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 28) { { if ((fabs((sx- (-4.295875165629892e3))/sx) <= EPSILON) && (fabs((sy- (-1.580732573678431e4))/sy) <= EPSILON)) { verified = TRUE; } printf("Debug: 231, sx is:%f, sy is:%f\n",sx,sy); } } else if (M == 30) { if ((fabs((sx- (4.033815542441498e4))/sx) <= EPSILON) && (fabs((sy- (-2.660669192809235e4))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 32) { if ((fabs((sx- (4.764367927995374e4))/sx) <= EPSILON) && (fabs((sy- (-8.084072988043731e4))/sy) <= EPSILON)) { verified = TRUE; } } Mops = pow(2.0, M+1)/tm/1000000.0; printf("EP Benchmark Results: \n" "CPU Time = %10.4f\n" "N = 2^%5d\n" "No. Gaussian Pairs = %15.0f\n" "Sums = %25.15e %25.15e\n" "Counts:\n", tm, M, gc, sx, sy); for (i = 0; i <= NQ-1; i++) { printf("%3d %15.0f\n", i, q[i]); } c_print_results("EP", CLASS, M+1, 0, 0, nit, nthreads, tm, Mops, "Random numbers generated", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (TIMERS_ENABLED == TRUE) { printf("Total time: %f", timer_read(1)); printf("Gaussian pairs: %f", timer_read(2)); printf("Random numbers: %f", timer_read(3)); } }
structure_factors_direct.h
#ifndef CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_H #define CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_H #include <cctbx/xray/scattering_type_registry.h> #include <cctbx/xray/hr_ht_cache.h> #include <cctbx/math/cos_sin_table.h> #include <omptbx/omp_or_stubs.h> #define CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_NO_PRAGMA_OMP namespace cctbx { namespace xray { namespace structure_factors { template <typename CosSinType, typename ScattererType> struct direct_sum_over_equivalent_h { typedef typename ScattererType::float_type float_type; typedef std::complex<float_type> complex_type; direct_sum_over_equivalent_h( CosSinType const& cos_sin_, sgtbx::space_group const& space_group_, miller::index<> h, float_type d_star_sq_) : cos_sin(cos_sin_), hr_ht(cos_sin_, space_group_, h), d_star_sq(d_star_sq_), sum_f_calc(0,0) {} void add_contribution_of(ScattererType const& scatterer, float_type f0) { typedef float_type f_t; typedef complex_type c_t; c_t f_calc(0,0); for(std::size_t i=0;i<hr_ht.groups.size();i++) { hr_ht_group<f_t> const& g = hr_ht.groups[i]; f_t hrx = g.hr * scatterer.site; c_t term = cos_sin.get(hrx + g.ht); if (scatterer.flags.use_u_aniso()) { f_t dw = adptbx::debye_waller_factor_u_star(g.hr, scatterer.u_star); term *= dw; if (scatterer.anharmonic_adp) { term *= scatterer.anharmonic_adp->calculate(g.hr); } } f_calc += term; } if (hr_ht.is_origin_centric) { f_calc = c_t(2*f_calc.real(),0); } else if (hr_ht.is_centric) { f_calc += std::conj(f_calc) * hr_ht.f_h_inv_t; } if (scatterer.flags.use_u_iso() && scatterer.u_iso != 0) { f_t dw=adptbx::debye_waller_factor_u_iso(d_star_sq/4, scatterer.u_iso); f_calc *= dw; } f_t w = scatterer.weight(); f_t f0p_w = (f0 + scatterer.fp) * w; f_t fdp_w = scatterer.fdp; if (fdp_w != 0) { fdp_w *= w; f_calc *= c_t(f0p_w, fdp_w); } else { f_calc *= f0p_w; } sum_f_calc += f_calc; } complex_type f_calc() { return sum_f_calc * hr_ht.ltr_factor; } CosSinType const &cos_sin; hr_ht_cache<float_type> hr_ht; float_type d_star_sq; complex_type sum_f_calc; }; template <class ScattererType=scatterer<> > class direct { public: typedef ScattererType scatterer_type; typedef typename ScattererType::float_type float_type; direct() {} direct( uctbx::unit_cell const& unit_cell, sgtbx::space_group const& space_group, af::const_ref<miller::index<> > const& miller_indices, af::const_ref<ScattererType> const& scatterers, xray::scattering_type_registry const& scattering_type_registry) { math::cos_sin_exact<float_type> cos_sin; compute(cos_sin, unit_cell, space_group, miller_indices, scatterers, scattering_type_registry); } template<class CosSinType> direct( CosSinType const& cos_sin, uctbx::unit_cell const& unit_cell, sgtbx::space_group const& space_group, af::const_ref<miller::index<> > const& miller_indices, af::const_ref<ScattererType> const& scatterers, xray::scattering_type_registry const& scattering_type_registry) { compute(cos_sin, unit_cell, space_group, miller_indices, scatterers, scattering_type_registry); } af::shared<std::complex<float_type> > const& f_calc() const { return f_calc_; } private: af::shared<std::complex<float_type> > f_calc_; template <typename CosSinType> void compute( CosSinType const& cos_sin, uctbx::unit_cell const& unit_cell, sgtbx::space_group const& space_group, af::const_ref<miller::index<> > const& miller_indices, af::const_ref<ScattererType> const& scatterers, xray::scattering_type_registry const& scattering_type_registry) { typedef float_type f_t; typedef std::complex<float_type> c_t; int n = static_cast<int>(miller_indices.size()); f_calc_ = af::shared<c_t>(n, af::init_functor_null<c_t>()); c_t *f_calc_beg = f_calc_.begin(); af::shared<std::size_t> scattering_type_indices = scattering_type_registry.unique_indices(scatterers); /* The OpenMP standard specifies that A throw executed inside a parallel region must cause execution to resume within the same parallel region, and it must be caught by the same thread that threw the exception. Since a std::runtime_error may be thrown during Debye-Waller computations (c.f. adptbx.h, function debye_waller_factor_exp) one must make sure it cannot escape the body of the parallelised loop. So we catch it inside the loop and then re-throw it immediately after the loop finished. */ boost::optional<std::runtime_error> error; #if !defined(CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_NO_PRAGMA_OMP) #if !defined(__DECCXX_VER) || (defined(_OPENMP) && _OPENMP > 199819) #pragma omp parallel for schedule(static) #endif #endif for(int i=0;i<n;i++) { try { miller::index<> h = miller_indices[i]; f_t d_star_sq = unit_cell.d_star_sq(h); af::shared<double> form_factors = scattering_type_registry.unique_form_factors_at_d_star_sq( d_star_sq); direct_sum_over_equivalent_h<CosSinType, ScattererType> sum(cos_sin, space_group, h, d_star_sq); for(std::size_t j=0; j<scatterers.size(); ++j) { sum.add_contribution_of(scatterers[j], form_factors[scattering_type_indices[j]]); } f_calc_beg[i] = sum.f_calc(); } catch (std::runtime_error const& e) { #pragma omp critical { // The first error will be recorded only. if (!error) error = e; } } } if (error) throw *error; } }; }}} // namespace cctbx::xray::structure_factors #endif // CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_H
libperf.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2019. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED. * Copyright (C) ARM Ltd. 2017-2020. ALL RIGHTS RESERVED. * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include <ucs/debug/log.h> #include <ucs/arch/bitops.h> #include <ucs/sys/module.h> #include <ucs/sys/string.h> #include <string.h> #include <tools/perf/lib/libperf_int.h> #include <unistd.h> #if _OPENMP #include <omp.h> #endif /* _OPENMP */ #define ATOMIC_OP_CONFIG(_size, _op32, _op64, _op, _msg, _params, _status) \ _status = __get_atomic_flag((_size), (_op32), (_op64), (_op)); \ if (_status != UCS_OK) { \ ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support atomic %s for " \ "message size %zu bytes", UCT_PERF_TEST_PARAMS_ARG(_params), \ (_msg)[_op], (_size)); \ return _status; \ } #define ATOMIC_OP_CHECK(_size, _attr, _required, _params, _msg) \ if (!ucs_test_all_flags(_attr, _required)) { \ if ((_params)->flags & UCX_PERF_TEST_FLAG_VERBOSE) { \ ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support required " \ #_size"-bit atomic: %s", UCT_PERF_TEST_PARAMS_ARG(_params), \ (_msg)[ucs_ffs64(~(_attr) & (_required))]); \ } \ return UCS_ERR_UNSUPPORTED; \ } typedef struct { union { struct { size_t dev_addr_len; size_t iface_addr_len; size_t ep_addr_len; } uct; struct { size_t worker_addr_len; size_t total_wireup_len; } ucp; }; size_t rkey_size; unsigned long recv_buffer; } ucx_perf_ep_info_t; const ucx_perf_allocator_t* ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_LAST]; static const char *perf_iface_ops[] = { [ucs_ilog2(UCT_IFACE_FLAG_AM_SHORT)] = "am short", [ucs_ilog2(UCT_IFACE_FLAG_AM_BCOPY)] = "am bcopy", [ucs_ilog2(UCT_IFACE_FLAG_AM_ZCOPY)] = "am zcopy", [ucs_ilog2(UCT_IFACE_FLAG_PUT_SHORT)] = "put short", [ucs_ilog2(UCT_IFACE_FLAG_PUT_BCOPY)] = "put bcopy", [ucs_ilog2(UCT_IFACE_FLAG_PUT_ZCOPY)] = "put zcopy", [ucs_ilog2(UCT_IFACE_FLAG_GET_SHORT)] = "get short", [ucs_ilog2(UCT_IFACE_FLAG_GET_BCOPY)] = "get bcopy", [ucs_ilog2(UCT_IFACE_FLAG_GET_ZCOPY)] = "get zcopy", [ucs_ilog2(UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)] = "peer failure handler", [ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_IFACE)] = "connect to iface", [ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_EP)] = "connect to ep", [ucs_ilog2(UCT_IFACE_FLAG_AM_DUP)] = "full reliability", [ucs_ilog2(UCT_IFACE_FLAG_CB_SYNC)] = "sync callback", [ucs_ilog2(UCT_IFACE_FLAG_CB_ASYNC)] = "async callback", [ucs_ilog2(UCT_IFACE_FLAG_PENDING)] = "pending", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_SHORT)] = "tag eager short", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_BCOPY)] = "tag eager bcopy", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_ZCOPY)] = "tag eager zcopy", [ucs_ilog2(UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)] = "tag rndv zcopy", [ucs_ilog2(UCT_IFACE_FLAG_EP_CHECK)] = "ep check", [ucs_ilog2(UCT_IFACE_FLAG_EP_KEEPALIVE)] = "ep keepalive" }; static const char *perf_atomic_op[] = { [UCT_ATOMIC_OP_ADD] = "add", [UCT_ATOMIC_OP_AND] = "and", [UCT_ATOMIC_OP_OR] = "or" , [UCT_ATOMIC_OP_XOR] = "xor" }; static const char *perf_atomic_fop[] = { [UCT_ATOMIC_OP_ADD] = "fetch-add", [UCT_ATOMIC_OP_AND] = "fetch-and", [UCT_ATOMIC_OP_OR] = "fetch-or", [UCT_ATOMIC_OP_XOR] = "fetch-xor", [UCT_ATOMIC_OP_SWAP] = "swap", [UCT_ATOMIC_OP_CSWAP] = "cswap" }; /* * This Quickselect routine is based on the algorithm described in * "Numerical recipes in C", Second Edition, * Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5 * This code by Nicolas Devillard - 1998. Public domain. */ static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n) { int low, high ; int median; int middle, ll, hh; #define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; } low = 0 ; high = n-1 ; median = (low + high) / 2; for (;;) { if (high <= low) /* One element only */ return arr[median] ; if (high == low + 1) { /* Two elements only */ if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ; return arr[median] ; } /* Find median of low, middle and high items; swap into position low */ middle = (low + high) / 2; if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ; if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ; if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ; /* Swap low item (now in position middle) into position (low+1) */ ELEM_SWAP(arr[middle], arr[low+1]) ; /* Nibble from each end towards middle, swapping items when stuck */ ll = low + 1; hh = high; for (;;) { do ll++; while (arr[low] > arr[ll]) ; do hh--; while (arr[hh] > arr[low]) ; if (hh < ll) break; ELEM_SWAP(arr[ll], arr[hh]) ; } /* Swap middle item (in position low) back into correct position */ ELEM_SWAP(arr[low], arr[hh]) ; /* Re-set active partition */ if (hh <= median) low = ll; if (hh >= median) high = hh - 1; } } static ucs_status_t uct_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length, unsigned flags, uct_allocated_memory_t *alloc_mem) { ucs_status_t status; status = uct_iface_mem_alloc(perf->uct.iface, length, flags, "perftest", alloc_mem); if (status != UCS_OK) { ucs_error("failed to allocate memory: %s", ucs_status_string(status)); return status; } ucs_assert(alloc_mem->md == perf->uct.md); return UCS_OK; } static void uct_perf_test_free_host(const ucx_perf_context_t *perf, uct_allocated_memory_t *alloc_mem) { uct_iface_mem_free(alloc_mem); } static void ucx_perf_test_memcpy_host(void *dst, ucs_memory_type_t dst_mem_type, const void *src, ucs_memory_type_t src_mem_type, size_t count) { if ((dst_mem_type != UCS_MEMORY_TYPE_HOST) || (src_mem_type != UCS_MEMORY_TYPE_HOST)) { ucs_error("wrong memory type passed src - %d, dst - %d", src_mem_type, dst_mem_type); } else { memcpy(dst, src, count); } } static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; ucs_status_t status; unsigned flags; size_t buffer_size; if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) { buffer_size = params->msg_size_cnt * params->iov_stride; } else { buffer_size = ucx_perf_get_message_size(params); } /* TODO use params->alignment */ flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ? UCT_MD_MEM_FLAG_NONBLOCK : 0; flags |= UCT_MD_MEM_ACCESS_ALL; /* Allocate send buffer memory */ status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count, flags, &perf->uct.send_mem); if (status != UCS_OK) { goto err; } perf->send_buffer = perf->uct.send_mem.address; /* Allocate receive buffer memory */ status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count, flags, &perf->uct.recv_mem); if (status != UCS_OK) { goto err_free_send; } perf->recv_buffer = perf->uct.recv_mem.address; /* Allocate IOV datatype memory */ perf->params.msg_size_cnt = params->msg_size_cnt; perf->uct.iov = malloc(sizeof(*perf->uct.iov) * perf->params.msg_size_cnt * params->thread_count); if (NULL == perf->uct.iov) { status = UCS_ERR_NO_MEMORY; ucs_error("Failed allocate send IOV(%lu) buffer: %s", perf->params.msg_size_cnt, ucs_status_string(status)); goto err_free_recv; } ucs_debug("allocated memory. Send buffer %p, Recv buffer %p", perf->send_buffer, perf->recv_buffer); return UCS_OK; err_free_recv: perf->allocator->uct_free(perf, &perf->uct.recv_mem); err_free_send: perf->allocator->uct_free(perf, &perf->uct.send_mem); err: return status; } static void uct_perf_test_free_mem(ucx_perf_context_t *perf) { perf->allocator->uct_free(perf, &perf->uct.send_mem); perf->allocator->uct_free(perf, &perf->uct.recv_mem); free(perf->uct.iov); } void ucx_perf_test_start_clock(ucx_perf_context_t *perf) { ucs_time_t start_time = ucs_get_time(); perf->start_time_acc = ucs_get_accurate_time(); perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX : ucs_time_from_sec(perf->params.max_time) + start_time; perf->prev_time = start_time; perf->prev.time = start_time; perf->prev.time_acc = perf->start_time_acc; perf->current.time_acc = perf->start_time_acc; } /* Initialize/reset all parameters that could be modified by the warm-up run */ static void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf, const ucx_perf_params_t *params) { unsigned i; perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX : perf->params.max_iter; perf->report_interval = ucs_time_from_sec(perf->params.report_interval); perf->current.time = 0; perf->current.msgs = 0; perf->current.bytes = 0; perf->current.iters = 0; perf->prev.msgs = 0; perf->prev.bytes = 0; perf->prev.iters = 0; perf->timing_queue_head = 0; for (i = 0; i < TIMING_QUEUE_SIZE; ++i) { perf->timing_queue[i] = 0; } ucx_perf_test_start_clock(perf); } static void ucx_perf_test_init(ucx_perf_context_t *perf, const ucx_perf_params_t *params) { unsigned group_index; perf->params = *params; group_index = rte_call(perf, group_index); if (0 == group_index) { perf->allocator = ucx_perf_mem_type_allocators[params->send_mem_type]; } else { perf->allocator = ucx_perf_mem_type_allocators[params->recv_mem_type]; } ucx_perf_test_prepare_new_run(perf, params); } void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result) { ucs_time_t median; double factor; if ((perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) || (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG_WAIT_MEM)) { factor = 2.0; } else { factor = 1.0; } result->iters = perf->current.iters; result->bytes = perf->current.bytes; result->elapsed_time = perf->current.time_acc - perf->start_time_acc; /* Latency */ median = __find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE); result->latency.typical = ucs_time_to_sec(median) / factor; result->latency.moment_average = (perf->current.time_acc - perf->prev.time_acc) / (perf->current.iters - perf->prev.iters) / factor; result->latency.total_average = (perf->current.time_acc - perf->start_time_acc) / perf->current.iters / factor; /* Bandwidth */ result->bandwidth.typical = 0.0; // Undefined result->bandwidth.moment_average = (perf->current.bytes - perf->prev.bytes) / (perf->current.time_acc - perf->prev.time_acc) * factor; result->bandwidth.total_average = perf->current.bytes / (perf->current.time_acc - perf->start_time_acc) * factor; /* Packet rate */ result->msgrate.typical = 0.0; // Undefined result->msgrate.moment_average = (perf->current.msgs - perf->prev.msgs) / (perf->current.time_acc - perf->prev.time_acc) * factor; result->msgrate.total_average = perf->current.msgs / (perf->current.time_acc - perf->start_time_acc) * factor; } static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params) { size_t it; /* check if zero-size messages are requested and supported */ if ((/* they are not supported by: */ /* - UCT tests, except UCT AM Short/Bcopy */ (params->api == UCX_PERF_API_UCT) || (/* - UCP RMA and AMO tests */ (params->api == UCX_PERF_API_UCP) && (params->command != UCX_PERF_CMD_AM) && (params->command != UCX_PERF_CMD_TAG) && (params->command != UCX_PERF_CMD_TAG_SYNC) && (params->command != UCX_PERF_CMD_STREAM))) && ucx_perf_get_message_size(params) < 1) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size too small, need to be at least 1"); } return UCS_ERR_INVALID_PARAM; } if ((params->api == UCX_PERF_API_UCP) && ((params->send_mem_type != UCS_MEMORY_TYPE_HOST) || (params->recv_mem_type != UCS_MEMORY_TYPE_HOST)) && ((params->command == UCX_PERF_CMD_PUT) || (params->command == UCX_PERF_CMD_GET) || (params->command == UCX_PERF_CMD_ADD) || (params->command == UCX_PERF_CMD_FADD) || (params->command == UCX_PERF_CMD_SWAP) || (params->command == UCX_PERF_CMD_CSWAP))) { /* TODO: remove when support for non-HOST memory types will be added */ if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("UCP doesn't support RMA/AMO for \"%s\"<->\"%s\" memory types", ucs_memory_type_names[params->send_mem_type], ucs_memory_type_names[params->recv_mem_type]); } return UCS_ERR_INVALID_PARAM; } if (params->max_outstanding < 1) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("max_outstanding, need to be at least 1"); } return UCS_ERR_INVALID_PARAM; } /* check if particular message size fit into stride size */ if (params->iov_stride) { for (it = 0; it < params->msg_size_cnt; ++it) { if (params->msg_size_list[it] > params->iov_stride) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Buffer size %lu bigger than stride %lu", params->msg_size_list[it], params->iov_stride); } return UCS_ERR_INVALID_PARAM; } } } return UCS_OK; } void uct_perf_ep_flush_b(ucx_perf_context_t *perf, int peer_index) { uct_ep_h ep = perf->uct.peers[peer_index].ep; uct_completion_t comp; ucs_status_t status; int started; started = 0; comp.func = NULL; comp.count = 2; do { if (!started) { status = uct_ep_flush(ep, 0, &comp); if (status == UCS_OK) { --comp.count; } else if (status == UCS_INPROGRESS) { started = 1; } else if (status != UCS_ERR_NO_RESOURCE) { ucs_error("uct_ep_flush() failed: %s", ucs_status_string(status)); return; } } uct_worker_progress(perf->uct.worker); } while (comp.count > 1); } void uct_perf_iface_flush_b(ucx_perf_context_t *perf) { ucs_status_t status; do { status = uct_iface_flush(perf->uct.iface, 0, NULL); uct_worker_progress(perf->uct.worker); } while (status == UCS_INPROGRESS); if (status != UCS_OK) { ucs_error("uct_iface_flush() failed: %s", ucs_status_string(status)); } } static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f, uint64_t bcopy_f, uint64_t zcopy_f) { return ((layout == UCT_PERF_DATA_LAYOUT_SHORT) || (layout == UCT_PERF_DATA_LAYOUT_SHORT_IOV)) ? short_f : (layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f : (layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f : 0; } static inline ucs_status_t __get_atomic_flag(size_t size, uint64_t *op32, uint64_t *op64, uint64_t op) { if (size == sizeof(uint32_t)) { *op32 = UCS_BIT(op); return UCS_OK; } else if (size == sizeof(uint64_t)) { *op64 = UCS_BIT(op); return UCS_OK; } return UCS_ERR_UNSUPPORTED; } static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m, size_t bcopy_m, uint64_t zcopy_m) { return ((layout == UCT_PERF_DATA_LAYOUT_SHORT) || (layout == UCT_PERF_DATA_LAYOUT_SHORT_IOV)) ? short_m : (layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m : (layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m : 0; } static ucs_status_t uct_perf_test_check_md_support(ucx_perf_params_t *params, ucs_memory_type_t mem_type, uct_md_attr_t *md_attr) { if (!(md_attr->cap.access_mem_types & UCS_BIT(mem_type)) && !(md_attr->cap.reg_mem_types & UCS_BIT(mem_type))) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Unsupported memory type %s by "UCT_PERF_TEST_PARAMS_FMT, ucs_memory_type_names[mem_type], UCT_PERF_TEST_PARAMS_ARG(params)); return UCS_ERR_INVALID_PARAM; } } return UCS_OK; } static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params, uct_iface_h iface, uct_md_h md) { uint64_t required_flags = 0; uint64_t atomic_op32 = 0; uint64_t atomic_op64 = 0; uint64_t atomic_fop32 = 0; uint64_t atomic_fop64 = 0; uct_md_attr_t md_attr; uct_iface_attr_t attr; ucs_status_t status; size_t min_size, max_size, max_iov, message_size; status = uct_md_query(md, &md_attr); if (status != UCS_OK) { ucs_error("uct_md_query(%s) failed: %s", params->uct.md_name, ucs_status_string(status)); return status; } status = uct_iface_query(iface, &attr); if (status != UCS_OK) { ucs_error("uct_iface_query("UCT_PERF_TEST_PARAMS_FMT") failed: %s", UCT_PERF_TEST_PARAMS_ARG(params), ucs_status_string(status)); return status; } min_size = 0; max_iov = 1; message_size = ucx_perf_get_message_size(params); switch (params->command) { case UCX_PERF_CMD_AM: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT, UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY); required_flags |= UCT_IFACE_FLAG_CB_SYNC; min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.am.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short, attr.cap.am.max_bcopy, attr.cap.am.max_zcopy); max_iov = attr.cap.am.max_iov; break; case UCX_PERF_CMD_PUT: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT, UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY); min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.put.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short, attr.cap.put.max_bcopy, attr.cap.put.max_zcopy); max_iov = attr.cap.put.max_iov; break; case UCX_PERF_CMD_GET: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_GET_SHORT, UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY); min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.get.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.get.max_short, attr.cap.get.max_bcopy, attr.cap.get.max_zcopy); max_iov = attr.cap.get.max_iov; break; case UCX_PERF_CMD_ADD: ATOMIC_OP_CONFIG(message_size, &atomic_op32, &atomic_op64, UCT_ATOMIC_OP_ADD, perf_atomic_op, params, status); max_size = 8; break; case UCX_PERF_CMD_FADD: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_ADD, perf_atomic_fop, params, status); max_size = 8; break; case UCX_PERF_CMD_SWAP: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_SWAP, perf_atomic_fop, params, status); max_size = 8; break; case UCX_PERF_CMD_CSWAP: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_CSWAP, perf_atomic_fop, params, status); max_size = 8; break; default: if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Invalid test command"); } return UCS_ERR_INVALID_PARAM; } status = ucx_perf_test_check_params(params); if (status != UCS_OK) { return status; } /* check atomics first */ ATOMIC_OP_CHECK(32, attr.cap.atomic32.op_flags, atomic_op32, params, perf_atomic_op); ATOMIC_OP_CHECK(64, attr.cap.atomic64.op_flags, atomic_op64, params, perf_atomic_op); ATOMIC_OP_CHECK(32, attr.cap.atomic32.fop_flags, atomic_fop32, params, perf_atomic_fop); ATOMIC_OP_CHECK(64, attr.cap.atomic64.fop_flags, atomic_fop64, params, perf_atomic_fop); /* check iface flags */ if (!(atomic_op32 | atomic_op64 | atomic_fop32 | atomic_fop64) && (!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags)) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support operation %s", UCT_PERF_TEST_PARAMS_ARG(params), perf_iface_ops[ucs_ffs64(~attr.cap.flags & required_flags)]); } return UCS_ERR_UNSUPPORTED; } if (message_size < min_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size (%zu) is smaller than min supported (%zu)", message_size, min_size); } return UCS_ERR_UNSUPPORTED; } if (message_size > max_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size (%zu) is larger than max supported (%zu)", message_size, max_size); } return UCS_ERR_UNSUPPORTED; } if (params->command == UCX_PERF_CMD_AM) { if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) && (params->uct.am_hdr_size != sizeof(uint64_t))) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Short AM header size must be 8 bytes"); } return UCS_ERR_INVALID_PARAM; } if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) && (params->uct.am_hdr_size > attr.cap.am.max_hdr)) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%zu) is larger than max supported " "(%zu)", params->uct.am_hdr_size, attr.cap.am.max_hdr); } return UCS_ERR_UNSUPPORTED; } if (params->uct.am_hdr_size > message_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%zu) is larger than message size " "(%zu)", params->uct.am_hdr_size, message_size); } return UCS_ERR_INVALID_PARAM; } if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM flow-control window (%d) too large (should be <= %d)", params->uct.fc_window, UCT_PERF_TEST_MAX_FC_WINDOW); } return UCS_ERR_INVALID_PARAM; } if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) && (params->flags & UCX_PERF_TEST_FLAG_VERBOSE)) { ucs_warn("Running active-message test with on-sided progress"); } } if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) || (UCT_PERF_DATA_LAYOUT_SHORT_IOV == params->uct.data_layout)) { if (params->msg_size_cnt > max_iov) { if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) || !params->msg_size_cnt) { ucs_error("Wrong number of IOV entries. Requested is %lu, " "should be in the range 1...%lu", params->msg_size_cnt, max_iov); } return UCS_ERR_UNSUPPORTED; } /* if msg_size_cnt == 1 the message size checked above */ if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && (UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) { if (params->uct.am_hdr_size > params->msg_size_list[0]) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%lu) larger than the first IOV " "message size (%lu)", params->uct.am_hdr_size, params->msg_size_list[0]); } return UCS_ERR_INVALID_PARAM; } } } status = uct_perf_test_check_md_support(params, params->send_mem_type, &md_attr); if (status != UCS_OK) { return status; } status = uct_perf_test_check_md_support(params, params->recv_mem_type, &md_attr); if (status != UCS_OK) { return status; } return UCS_OK; } static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf) { const size_t buffer_size = ADDR_BUF_SIZE; ucx_perf_ep_info_t info, *remote_info; unsigned group_size, i, group_index; uct_device_addr_t *dev_addr; uct_iface_addr_t *iface_addr; uct_ep_addr_t *ep_addr; uct_iface_attr_t iface_attr; uct_md_attr_t md_attr; uct_ep_params_t ep_params; void *rkey_buffer; ucs_status_t status; struct iovec vec[5]; void *buffer; void *req; buffer = malloc(buffer_size); if (buffer == NULL) { ucs_error("Failed to allocate RTE buffer"); status = UCS_ERR_NO_MEMORY; goto err; } status = uct_iface_query(perf->uct.iface, &iface_attr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status)); goto err_free; } status = uct_md_query(perf->uct.md, &md_attr); if (status != UCS_OK) { ucs_error("Failed to uct_md_query: %s", ucs_status_string(status)); goto err_free; } if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) { info.rkey_size = md_attr.rkey_packed_size; } else { info.rkey_size = 0; } info.uct.dev_addr_len = iface_attr.device_addr_len; info.uct.iface_addr_len = iface_attr.iface_addr_len; info.uct.ep_addr_len = iface_attr.ep_addr_len; info.recv_buffer = (uintptr_t)perf->recv_buffer; rkey_buffer = buffer; dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, info.rkey_size); iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, info.uct.dev_addr_len); ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, info.uct.iface_addr_len); ucs_assert_always(UCS_PTR_BYTE_OFFSET(ep_addr, info.uct.ep_addr_len) <= UCS_PTR_BYTE_OFFSET(buffer, buffer_size)); status = uct_iface_get_device_address(perf->uct.iface, dev_addr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_get_device_address: %s", ucs_status_string(status)); goto err_free; } status = uct_iface_get_address(perf->uct.iface, iface_addr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status)); goto err_free; } if (info.rkey_size > 0) { memset(rkey_buffer, 0, info.rkey_size); status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer); if (status != UCS_OK) { ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status)); goto err_free; } } group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers)); if (perf->uct.peers == NULL) { goto err_free; } ep_params.field_mask = UCT_EP_PARAM_FIELD_IFACE; ep_params.iface = perf->uct.iface; if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { for (i = 0; i < group_size; ++i) { if (i == group_index) { continue; } status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep); if (status != UCS_OK) { ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status)); goto err_destroy_eps; } status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr); if (status != UCS_OK) { ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status)); goto err_destroy_eps; } } } else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { ep_params.field_mask |= UCT_EP_PARAM_FIELD_DEV_ADDR | UCT_EP_PARAM_FIELD_IFACE_ADDR; } vec[0].iov_base = &info; vec[0].iov_len = sizeof(info); vec[1].iov_base = buffer; vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len + info.uct.iface_addr_len + info.uct.ep_addr_len; rte_call(perf, post_vec, vec, 2, &req); rte_call(perf, exchange_vec, req); for (i = 0; i < group_size; ++i) { if (i == group_index) { continue; } rte_call(perf, recv, i, buffer, buffer_size, req); remote_info = buffer; rkey_buffer = remote_info + 1; dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, remote_info->rkey_size); iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, remote_info->uct.dev_addr_len); ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, remote_info->uct.iface_addr_len); perf->uct.peers[i].remote_addr = remote_info->recv_buffer; if (!uct_iface_is_reachable(perf->uct.iface, dev_addr, remote_info->uct.iface_addr_len ? iface_addr : NULL)) { ucs_error("Destination is unreachable"); status = UCS_ERR_UNREACHABLE; goto err_destroy_eps; } if (remote_info->rkey_size > 0) { status = uct_rkey_unpack(perf->uct.cmpt, rkey_buffer, &perf->uct.peers[i].rkey); if (status != UCS_OK) { ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status)); goto err_destroy_eps; } } else { perf->uct.peers[i].rkey.handle = NULL; perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY; } if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr); } else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { ep_params.dev_addr = dev_addr; ep_params.iface_addr = iface_addr; status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep); } else { status = UCS_ERR_UNSUPPORTED; } if (status != UCS_OK) { ucs_error("Failed to connect endpoint: %s", ucs_status_string(status)); goto err_destroy_eps; } } uct_perf_iface_flush_b(perf); free(buffer); uct_perf_barrier(perf); return UCS_OK; err_destroy_eps: for (i = 0; i < group_size; ++i) { if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) { uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey); } if (perf->uct.peers[i].ep != NULL) { uct_ep_destroy(perf->uct.peers[i].ep); } } free(perf->uct.peers); err_free: free(buffer); err: return status; } static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf) { unsigned group_size, group_index, i; uct_perf_barrier(perf); uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, 0); group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); for (i = 0; i < group_size; ++i) { if (i != group_index) { if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) { uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey); } if (perf->uct.peers[i].ep) { uct_ep_destroy(perf->uct.peers[i].ep); } } } free(perf->uct.peers); } static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params, ucp_params_t *ucp_params) { ucs_status_t status; size_t message_size; message_size = ucx_perf_get_message_size(params); switch (params->command) { case UCX_PERF_CMD_PUT: case UCX_PERF_CMD_GET: ucp_params->features |= UCP_FEATURE_RMA; break; case UCX_PERF_CMD_ADD: case UCX_PERF_CMD_FADD: case UCX_PERF_CMD_SWAP: case UCX_PERF_CMD_CSWAP: if (message_size == sizeof(uint32_t)) { ucp_params->features |= UCP_FEATURE_AMO32; } else if (message_size == sizeof(uint64_t)) { ucp_params->features |= UCP_FEATURE_AMO64; } else { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Atomic size should be either 32 or 64 bit"); } return UCS_ERR_INVALID_PARAM; } break; case UCX_PERF_CMD_TAG: case UCX_PERF_CMD_TAG_SYNC: ucp_params->features |= UCP_FEATURE_TAG; break; case UCX_PERF_CMD_STREAM: ucp_params->features |= UCP_FEATURE_STREAM; break; case UCX_PERF_CMD_AM: ucp_params->features |= UCP_FEATURE_AM; break; default: if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Invalid test command"); } return UCS_ERR_INVALID_PARAM; } if ((params->flags & UCX_PERF_TEST_FLAG_WAKEUP) || (params->wait_mode == UCX_PERF_WAIT_MODE_SLEEP)) { ucp_params->features |= UCP_FEATURE_WAKEUP; } status = ucx_perf_test_check_params(params); if (status != UCS_OK) { return status; } return UCS_OK; } static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype, size_t iovcnt, unsigned thread_count, ucp_dt_iov_t **iov_p) { ucp_dt_iov_t *iov; if (UCP_PERF_DATATYPE_IOV == datatype) { iov = malloc(sizeof(*iov) * iovcnt * thread_count); if (NULL == iov) { ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt); return UCS_ERR_NO_MEMORY; } *iov_p = iov; } return UCS_OK; } static ucs_status_t ucp_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length, void **address_p, ucp_mem_h *memh, int non_blk_flag) { ucp_mem_map_params_t mem_map_params; ucp_mem_attr_t mem_attr; ucs_status_t status; mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS | UCP_MEM_MAP_PARAM_FIELD_LENGTH | UCP_MEM_MAP_PARAM_FIELD_FLAGS; mem_map_params.address = *address_p; mem_map_params.length = length; mem_map_params.flags = UCP_MEM_MAP_ALLOCATE; if (perf->params.flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) { mem_map_params.flags |= non_blk_flag; } status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh); if (status != UCS_OK) { goto err; } mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS; status = ucp_mem_query(*memh, &mem_attr); if (status != UCS_OK) { goto err; } *address_p = mem_attr.address; return UCS_OK; err: return status; } static void ucp_perf_test_free_host(const ucx_perf_context_t *perf, void *address, ucp_mem_h memh) { ucs_status_t status; status = ucp_mem_unmap(perf->ucp.context, memh); if (status != UCS_OK) { ucs_warn("ucp_mem_unmap() failed: %s", ucs_status_string(status)); } } static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; ucs_status_t status; size_t buffer_size; if (params->iov_stride) { buffer_size = params->msg_size_cnt * params->iov_stride; } else { buffer_size = ucx_perf_get_message_size(params); } /* Allocate send buffer memory */ perf->send_buffer = NULL; status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count, &perf->send_buffer, &perf->ucp.send_memh, UCP_MEM_MAP_NONBLOCK); if (status != UCS_OK) { goto err; } /* Allocate receive buffer memory */ perf->recv_buffer = NULL; status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count, &perf->recv_buffer, &perf->ucp.recv_memh, 0); if (status != UCS_OK) { goto err_free_send_buffer; } /* Allocate AM header */ if (params->ucp.am_hdr_size != 0) { perf->ucp.am_hdr = malloc(params->ucp.am_hdr_size); if (perf->ucp.am_hdr == NULL) { goto err_free_buffers; } } else { perf->ucp.am_hdr = NULL; } /* Allocate IOV datatype memory */ perf->ucp.send_iov = NULL; status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype, perf->params.msg_size_cnt, params->thread_count, &perf->ucp.send_iov); if (UCS_OK != status) { goto err_free_am_hdr; } perf->ucp.recv_iov = NULL; status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype, perf->params.msg_size_cnt, params->thread_count, &perf->ucp.recv_iov); if (UCS_OK != status) { goto err_free_send_iov_buffers; } return UCS_OK; err_free_send_iov_buffers: free(perf->ucp.send_iov); err_free_am_hdr: free(perf->ucp.am_hdr); err_free_buffers: perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh); err_free_send_buffer: perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh); err: return UCS_ERR_NO_MEMORY; } static void ucp_perf_test_free_mem(ucx_perf_context_t *perf) { free(perf->ucp.recv_iov); free(perf->ucp.send_iov); free(perf->ucp.am_hdr); perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh); perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh); } static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf) { unsigned i, thread_count = perf->params.thread_count; ucs_status_ptr_t *req; ucs_status_t status; for (i = 0; i < thread_count; ++i) { if (perf->ucp.tctx[i].perf.ucp.rkey != NULL) { ucp_rkey_destroy(perf->ucp.tctx[i].perf.ucp.rkey); } if (perf->ucp.tctx[i].perf.ucp.ep != NULL) { req = ucp_ep_close_nb(perf->ucp.tctx[i].perf.ucp.ep, UCP_EP_CLOSE_MODE_FLUSH); if (UCS_PTR_IS_PTR(req)) { do { ucp_worker_progress(perf->ucp.tctx[i].perf.ucp.worker); status = ucp_request_check_status(req); } while (status == UCS_INPROGRESS); ucp_request_release(req); } else if (UCS_PTR_STATUS(req) != UCS_OK) { ucs_warn("failed to close ep %p on thread %d: %s\n", perf->ucp.tctx[i].perf.ucp.ep, i, ucs_status_string(UCS_PTR_STATUS(req))); } } } } static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf, ucs_status_t status) { unsigned group_size = rte_call(perf, group_size); ucs_status_t collective_status = status; struct iovec vec; void *req = NULL; unsigned i; vec.iov_base = &status; vec.iov_len = sizeof(status); rte_call(perf, post_vec, &vec, 1, &req); rte_call(perf, exchange_vec, req); for (i = 0; i < group_size; ++i) { rte_call(perf, recv, i, &status, sizeof(status), req); if (status != UCS_OK) { collective_status = status; } } return collective_status; } static void ucp_perf_test_err_handler(void *arg, ucp_ep_h ep, ucs_status_t status) { ucs_error("error handler called with status %d (%s)\n", status, ucs_status_string(status)); } static ucs_status_t ucp_perf_test_receive_remote_data(ucx_perf_context_t *perf) { unsigned thread_count = perf->params.thread_count; void *rkey_buffer = NULL; void *req = NULL; unsigned group_size, group_index, i; ucx_perf_ep_info_t *remote_info; ucp_ep_params_t ep_params; ucp_address_t *address; ucs_status_t status; size_t buffer_size; void *buffer; group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); if (group_size != 2) { ucs_error("perftest requires group size to be exactly 2 " "(actual group size: %u)", group_size); return UCS_ERR_UNSUPPORTED; } buffer_size = ADDR_BUF_SIZE * thread_count; buffer = malloc(buffer_size); if (buffer == NULL) { ucs_error("failed to allocate RTE receive buffer"); status = UCS_ERR_NO_MEMORY; goto err; } /* Initialize all endpoints and rkeys to NULL to handle error flow */ for (i = 0; i < thread_count; i++) { perf->ucp.tctx[i].perf.ucp.ep = NULL; perf->ucp.tctx[i].perf.ucp.rkey = NULL; } /* receive the data from the remote peer, extract the address from it * (along with additional wireup info) and create an endpoint to the peer */ rte_call(perf, recv, 1 - group_index, buffer, buffer_size, req); remote_info = buffer; for (i = 0; i < thread_count; i++) { address = (ucp_address_t*)(remote_info + 1); rkey_buffer = UCS_PTR_BYTE_OFFSET(address, remote_info->ucp.worker_addr_len); perf->ucp.tctx[i].perf.ucp.remote_addr = remote_info->recv_buffer; ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS; ep_params.address = address; if (perf->params.flags & UCX_PERF_TEST_FLAG_ERR_HANDLING) { ep_params.field_mask |= UCP_EP_PARAM_FIELD_ERR_HANDLER | UCP_EP_PARAM_FIELD_ERR_HANDLING_MODE; ep_params.err_handler.cb = ucp_perf_test_err_handler; ep_params.err_handler.arg = NULL; ep_params.err_mode = UCP_ERR_HANDLING_MODE_PEER; } status = ucp_ep_create(perf->ucp.tctx[i].perf.ucp.worker, &ep_params, &perf->ucp.tctx[i].perf.ucp.ep); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status)); } goto err_free_eps_buffer; } if (remote_info->rkey_size > 0) { status = ucp_ep_rkey_unpack(perf->ucp.tctx[i].perf.ucp.ep, rkey_buffer, &perf->ucp.tctx[i].perf.ucp.rkey); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status)); } goto err_free_eps_buffer; } } else { perf->ucp.tctx[i].perf.ucp.rkey = NULL; } remote_info = UCS_PTR_BYTE_OFFSET(remote_info, remote_info->ucp.total_wireup_len); } free(buffer); return UCS_OK; err_free_eps_buffer: ucp_perf_test_destroy_eps(perf); free(buffer); err: return status; } static ucs_status_t ucp_perf_test_send_local_data(ucx_perf_context_t *perf, uint64_t features) { unsigned i, j, thread_count = perf->params.thread_count; size_t address_length = 0; void *rkey_buffer = NULL; void *req = NULL; ucx_perf_ep_info_t *info; ucp_address_t *address; ucs_status_t status; struct iovec *vec; size_t rkey_size; if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) { status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh, &rkey_buffer, &rkey_size); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status)); } goto err; } } else { rkey_size = 0; } /* each thread has an iovec with 3 entries to send to the remote peer: * ep_info, worker_address and rkey buffer */ vec = calloc(3 * thread_count, sizeof(struct iovec)); if (vec == NULL) { ucs_error("failed to allocate iovec"); status = UCS_ERR_NO_MEMORY; goto err_rkey_release; } /* get the worker address created for every thread and send it to the remote * peer */ for (i = 0; i < thread_count; i++) { status = ucp_worker_get_address(perf->ucp.tctx[i].perf.ucp.worker, &address, &address_length); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_worker_get_address() failed: %s", ucs_status_string(status)); } goto err_free_workers_vec; } vec[i * 3].iov_base = malloc(sizeof(*info)); if (vec[i * 3].iov_base == NULL) { ucs_error("failed to allocate vec entry for info"); status = UCS_ERR_NO_MEMORY; ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker); goto err_free_workers_vec; } info = vec[i * 3].iov_base; info->ucp.worker_addr_len = address_length; info->ucp.total_wireup_len = sizeof(*info) + address_length + rkey_size; info->rkey_size = rkey_size; info->recv_buffer = (uintptr_t)perf->ucp.tctx[i].perf.recv_buffer; vec[(i * 3) + 0].iov_len = sizeof(*info); vec[(i * 3) + 1].iov_base = address; vec[(i * 3) + 1].iov_len = address_length; vec[(i * 3) + 2].iov_base = rkey_buffer; vec[(i * 3) + 2].iov_len = info->rkey_size; address_length = 0; } /* send to the remote peer */ rte_call(perf, post_vec, vec, 3 * thread_count, &req); rte_call(perf, exchange_vec, req); if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) { ucp_rkey_buffer_release(rkey_buffer); } for (i = 0; i < thread_count; i++) { free(vec[i * 3].iov_base); ucp_worker_release_address(perf->ucp.tctx[i].perf.ucp.worker, vec[(i * 3) + 1].iov_base); } free(vec); return UCS_OK; err_free_workers_vec: for (j = 0; j < i; j++) { ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker); } free(vec); err_rkey_release: if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) { ucp_rkey_buffer_release(rkey_buffer); } err: return status; } static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf, uint64_t features) { ucs_status_t status; unsigned i; /* pack the local endpoints data and send to the remote peer */ status = ucp_perf_test_send_local_data(perf, features); if (status != UCS_OK) { goto err; } /* receive remote peer's endpoints' data and connect to them */ status = ucp_perf_test_receive_remote_data(perf); if (status != UCS_OK) { goto err; } /* sync status across all processes */ status = ucp_perf_test_exchange_status(perf, UCS_OK); if (status != UCS_OK) { goto err_destroy_eps; } /* force wireup completion */ for (i = 0; i < perf->params.thread_count; i++) { status = ucp_worker_flush(perf->ucp.tctx[i].perf.ucp.worker); if (status != UCS_OK) { ucs_warn("ucp_worker_flush() failed on thread %d: %s", i, ucs_status_string(status)); } } return status; err_destroy_eps: ucp_perf_test_destroy_eps(perf); err: (void)ucp_perf_test_exchange_status(perf, status); return status; } static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf) { ucp_perf_barrier(perf); ucp_perf_test_destroy_eps(perf); } static void ucp_perf_test_destroy_workers(ucx_perf_context_t *perf) { unsigned i; for (i = 0; i < perf->params.thread_count; i++) { if (perf->ucp.tctx[i].perf.ucp.worker != NULL) { ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker); } } } static void ucx_perf_set_warmup(ucx_perf_context_t* perf, const ucx_perf_params_t* params) { perf->max_iter = ucs_min(params->warmup_iter, ucs_div_round_up(params->max_iter, 10)); perf->report_interval = ULONG_MAX; } static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf) { uct_component_h *uct_components; uct_component_attr_t component_attr; uct_tl_resource_desc_t *tl_resources; unsigned md_index, num_components; unsigned tl_index, num_tl_resources; unsigned cmpt_index; ucs_status_t status; uct_md_h md; uct_md_config_t *md_config; status = uct_query_components(&uct_components, &num_components); if (status != UCS_OK) { goto out; } for (cmpt_index = 0; cmpt_index < num_components; ++cmpt_index) { component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCE_COUNT; status = uct_component_query(uct_components[cmpt_index], &component_attr); if (status != UCS_OK) { goto out_release_components_list; } component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCES; component_attr.md_resources = alloca(sizeof(*component_attr.md_resources) * component_attr.md_resource_count); status = uct_component_query(uct_components[cmpt_index], &component_attr); if (status != UCS_OK) { goto out_release_components_list; } for (md_index = 0; md_index < component_attr.md_resource_count; ++md_index) { status = uct_md_config_read(uct_components[cmpt_index], NULL, NULL, &md_config); if (status != UCS_OK) { goto out_release_components_list; } ucs_strncpy_zero(perf->params.uct.md_name, component_attr.md_resources[md_index].md_name, UCT_MD_NAME_MAX); status = uct_md_open(uct_components[cmpt_index], component_attr.md_resources[md_index].md_name, md_config, &md); uct_config_release(md_config); if (status != UCS_OK) { goto out_release_components_list; } status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources); if (status != UCS_OK) { uct_md_close(md); goto out_release_components_list; } for (tl_index = 0; tl_index < num_tl_resources; ++tl_index) { if (!strcmp(perf->params.uct.tl_name, tl_resources[tl_index].tl_name) && !strcmp(perf->params.uct.dev_name, tl_resources[tl_index].dev_name)) { uct_release_tl_resource_list(tl_resources); perf->uct.cmpt = uct_components[cmpt_index]; perf->uct.md = md; status = UCS_OK; goto out_release_components_list; } } uct_md_close(md); uct_release_tl_resource_list(tl_resources); } } ucs_error("Cannot use "UCT_PERF_TEST_PARAMS_FMT, UCT_PERF_TEST_PARAMS_ARG(&perf->params)); status = UCS_ERR_NO_DEVICE; out_release_components_list: uct_release_component_list(uct_components); out: return status; } void uct_perf_barrier(ucx_perf_context_t *perf) { rte_call(perf, barrier, (void(*)(void*))uct_worker_progress, (void*)perf->uct.worker); } void ucp_perf_barrier(ucx_perf_context_t *perf) { rte_call(perf, barrier, (void(*)(void*))ucp_worker_progress, #if _OPENMP (void*)perf->ucp.tctx[omp_get_thread_num()].perf.ucp.worker); #else (void*)perf->ucp.tctx[0].perf.ucp.worker); #endif } static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; uct_iface_config_t *iface_config; ucs_status_t status; uct_iface_params_t iface_params = { .field_mask = UCT_IFACE_PARAM_FIELD_OPEN_MODE | UCT_IFACE_PARAM_FIELD_STATS_ROOT | UCT_IFACE_PARAM_FIELD_RX_HEADROOM | UCT_IFACE_PARAM_FIELD_CPU_MASK, .open_mode = UCT_IFACE_OPEN_MODE_DEVICE, .mode.device.tl_name = params->uct.tl_name, .mode.device.dev_name = params->uct.dev_name, .stats_root = ucs_stats_get_root(), .rx_headroom = 0 }; UCS_CPU_ZERO(&iface_params.cpu_mask); status = ucs_async_context_init(&perf->uct.async, params->async_mode); if (status != UCS_OK) { goto out; } status = uct_worker_create(&perf->uct.async, params->thread_mode, &perf->uct.worker); if (status != UCS_OK) { goto out_cleanup_async; } status = uct_perf_create_md(perf); if (status != UCS_OK) { goto out_destroy_worker; } status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL, NULL, &iface_config); if (status != UCS_OK) { goto out_destroy_md; } status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params, iface_config, &perf->uct.iface); uct_config_release(iface_config); if (status != UCS_OK) { ucs_error("Failed to open iface: %s", ucs_status_string(status)); goto out_destroy_md; } status = uct_perf_test_check_capabilities(params, perf->uct.iface, perf->uct.md); /* sync status across all processes */ status = ucp_perf_test_exchange_status(perf, status); if (status != UCS_OK) { goto out_iface_close; } status = uct_perf_test_alloc_mem(perf); if (status != UCS_OK) { goto out_iface_close; } /* Enable progress before `uct_iface_flush` and `uct_worker_progress` called * to give a chance to finish connection for some transports (ib/ud, tcp). * They may return UCS_INPROGRESS from `uct_iface_flush` when connections are * in progress */ uct_iface_progress_enable(perf->uct.iface, UCT_PROGRESS_SEND | UCT_PROGRESS_RECV); status = uct_perf_test_setup_endpoints(perf); if (status != UCS_OK) { ucs_error("Failed to setup endpoints: %s", ucs_status_string(status)); goto out_free_mem; } return UCS_OK; out_free_mem: uct_perf_test_free_mem(perf); out_iface_close: uct_iface_close(perf->uct.iface); out_destroy_md: uct_md_close(perf->uct.md); out_destroy_worker: uct_worker_destroy(perf->uct.worker); out_cleanup_async: ucs_async_context_cleanup(&perf->uct.async); out: return status; } static void uct_perf_cleanup(ucx_perf_context_t *perf) { uct_perf_test_cleanup_endpoints(perf); uct_perf_test_free_mem(perf); uct_iface_close(perf->uct.iface); uct_md_close(perf->uct.md); uct_worker_destroy(perf->uct.worker); ucs_async_context_cleanup(&perf->uct.async); } static void ucp_perf_request_init(void *req) { ucp_perf_request_t *request = req; request->context = NULL; } static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf) { ucp_params_t ucp_params; ucp_worker_params_t worker_params; ucp_worker_attr_t worker_attr; ucp_config_t *config; ucs_status_t status; unsigned i, thread_count; size_t message_size; ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES | UCP_PARAM_FIELD_REQUEST_SIZE | UCP_PARAM_FIELD_REQUEST_INIT; ucp_params.features = 0; ucp_params.request_size = sizeof(ucp_perf_request_t); ucp_params.request_init = ucp_perf_request_init; if (perf->params.thread_count > 1) { /* when there is more than one thread, a ucp_worker would be created for * each. all of them will share the same ucp_context */ ucp_params.features |= UCP_PARAM_FIELD_MT_WORKERS_SHARED; ucp_params.mt_workers_shared = 1; } status = ucp_perf_test_fill_params(&perf->params, &ucp_params); if (status != UCS_OK) { goto err; } status = ucp_config_read(NULL, NULL, &config); if (status != UCS_OK) { goto err; } status = ucp_init(&ucp_params, config, &perf->ucp.context); ucp_config_release(config); if (status != UCS_OK) { goto err; } thread_count = perf->params.thread_count; message_size = ucx_perf_get_message_size(&perf->params); status = ucp_perf_test_alloc_mem(perf); if (status != UCS_OK) { ucs_warn("ucp test failed to allocate memory"); goto err_cleanup; } perf->ucp.tctx = calloc(thread_count, sizeof(ucx_perf_thread_context_t)); if (perf->ucp.tctx == NULL) { ucs_warn("ucp test failed to allocate memory for thread contexts"); goto err_free_mem; } worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE; worker_params.thread_mode = perf->params.thread_mode; for (i = 0; i < thread_count; i++) { perf->ucp.tctx[i].tid = i; perf->ucp.tctx[i].perf = *perf; /* Doctor the src and dst buffers to make them thread specific */ perf->ucp.tctx[i].perf.send_buffer = UCS_PTR_BYTE_OFFSET(perf->send_buffer, i * message_size); perf->ucp.tctx[i].perf.recv_buffer = UCS_PTR_BYTE_OFFSET(perf->recv_buffer, i * message_size); status = ucp_worker_create(perf->ucp.context, &worker_params, &perf->ucp.tctx[i].perf.ucp.worker); if (status != UCS_OK) { goto err_free_tctx_destroy_workers; } } if (perf->params.command == UCX_PERF_CMD_AM) { /* Check that requested AM header size is not larger than max supported. */ worker_attr.field_mask = UCP_WORKER_ATTR_FIELD_MAX_AM_HEADER; status = ucp_worker_query(perf->ucp.tctx[0].perf.ucp.worker, &worker_attr); if (status != UCS_OK) { goto err_free_tctx_destroy_workers; } if (worker_attr.max_am_header < perf->params.ucp.am_hdr_size) { ucs_error("AM header size (%zu) is larger than max supported (%zu)", perf->params.ucp.am_hdr_size, worker_attr.max_am_header); status = UCS_ERR_INVALID_PARAM; goto err_free_tctx_destroy_workers; } } status = ucp_perf_test_setup_endpoints(perf, ucp_params.features); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Failed to setup endpoints: %s", ucs_status_string(status)); } goto err_free_tctx_destroy_workers; } return UCS_OK; err_free_tctx_destroy_workers: ucp_perf_test_destroy_workers(perf); free(perf->ucp.tctx); err_free_mem: ucp_perf_test_free_mem(perf); err_cleanup: ucp_cleanup(perf->ucp.context); err: return status; } static void ucp_perf_cleanup(ucx_perf_context_t *perf) { ucp_perf_test_cleanup_endpoints(perf); ucp_perf_barrier(perf); ucp_perf_test_free_mem(perf); ucp_perf_test_destroy_workers(perf); free(perf->ucp.tctx); ucp_cleanup(perf->ucp.context); } static struct { ucs_status_t (*setup)(ucx_perf_context_t *perf); void (*cleanup)(ucx_perf_context_t *perf); ucs_status_t (*run)(ucx_perf_context_t *perf); void (*barrier)(ucx_perf_context_t *perf); } ucx_perf_funcs[] = { [UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup, uct_perf_test_dispatch, uct_perf_barrier}, [UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup, ucp_perf_test_dispatch, ucp_perf_barrier} }; static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result); ucs_status_t ucx_perf_run(const ucx_perf_params_t *params, ucx_perf_result_t *result) { ucx_perf_context_t *perf; ucs_status_t status; ucx_perf_global_init(); if (params->command == UCX_PERF_CMD_LAST) { ucs_error("Test is not selected"); status = UCS_ERR_INVALID_PARAM; goto out; } if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) { ucs_error("Invalid test API parameter (should be UCT or UCP)"); status = UCS_ERR_INVALID_PARAM; goto out; } perf = malloc(sizeof(*perf)); if (perf == NULL) { status = UCS_ERR_NO_MEMORY; goto out; } ucx_perf_test_init(perf, params); if (perf->allocator == NULL) { ucs_error("Unsupported memory types %s<->%s", ucs_memory_type_names[params->send_mem_type], ucs_memory_type_names[params->recv_mem_type]); status = UCS_ERR_UNSUPPORTED; goto out_free; } if ((params->api == UCX_PERF_API_UCT) && (perf->allocator->mem_type != UCS_MEMORY_TYPE_HOST)) { ucs_warn("UCT tests also copy 2-byte values from %s memory to " "%s memory, which may impact performance results", ucs_memory_type_names[perf->allocator->mem_type], ucs_memory_type_names[UCS_MEMORY_TYPE_HOST]); } status = perf->allocator->init(perf); if (status != UCS_OK) { goto out_free; } status = ucx_perf_funcs[params->api].setup(perf); if (status != UCS_OK) { goto out_free; } if (params->thread_count == 1) { if (params->api == UCX_PERF_API_UCP) { perf->ucp.worker = perf->ucp.tctx[0].perf.ucp.worker; perf->ucp.ep = perf->ucp.tctx[0].perf.ucp.ep; perf->ucp.remote_addr = perf->ucp.tctx[0].perf.ucp.remote_addr; perf->ucp.rkey = perf->ucp.tctx[0].perf.ucp.rkey; } if (params->warmup_iter > 0) { ucx_perf_set_warmup(perf, params); status = ucx_perf_funcs[params->api].run(perf); if (status != UCS_OK) { goto out_cleanup; } ucx_perf_funcs[params->api].barrier(perf); ucx_perf_test_prepare_new_run(perf, params); } /* Run test */ status = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); if (status == UCS_OK) { ucx_perf_calc_result(perf, result); rte_call(perf, report, result, perf->params.report_arg, 1, 0); } } else { status = ucx_perf_thread_spawn(perf, result); } out_cleanup: ucx_perf_funcs[params->api].cleanup(perf); out_free: free(perf); out: return status; } #if _OPENMP static ucs_status_t ucx_perf_thread_run_test(void* arg) { ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg; /* a single thread context */ ucx_perf_result_t* result = &tctx->result; ucx_perf_context_t* perf = &tctx->perf; ucx_perf_params_t* params = &perf->params; ucs_status_t status; /* new threads need explicit device association */ status = perf->allocator->init(perf); if (status != UCS_OK) { goto out; } if (params->warmup_iter > 0) { ucx_perf_set_warmup(perf, params); status = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); if (UCS_OK != status) { goto out; } ucx_perf_test_prepare_new_run(perf, params); } /* Run test */ #pragma omp barrier status = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); if (UCS_OK != status) { goto out; } ucx_perf_calc_result(perf, result); out: return status; } static void ucx_perf_thread_report_aggregated_results(ucx_perf_context_t *perf) { ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */ unsigned i, thread_count = perf->params.thread_count; double lat_sum_total_avegare = 0.0; ucx_perf_result_t agg_result; agg_result.iters = tctx[0].result.iters; agg_result.bytes = tctx[0].result.bytes; agg_result.elapsed_time = tctx[0].result.elapsed_time; agg_result.bandwidth.total_average = 0.0; agg_result.bandwidth.typical = 0.0; /* Undefined since used only for latency calculations */ agg_result.latency.total_average = 0.0; agg_result.msgrate.total_average = 0.0; agg_result.msgrate.typical = 0.0; /* Undefined since used only for latency calculations */ /* when running with multiple threads, the moment average value is * undefined since we don't capture the values of the last iteration */ agg_result.msgrate.moment_average = 0.0; agg_result.bandwidth.moment_average = 0.0; agg_result.latency.moment_average = 0.0; agg_result.latency.typical = 0.0; /* in case of multiple threads, we have to aggregate the results so that the * final output of the result would show the performance numbers that were * collected from all the threads. * BW and message rate values will be the sum of their values from all * the threads, while the latency value is the average latency from the * threads. */ for (i = 0; i < thread_count; i++) { agg_result.bandwidth.total_average += tctx[i].result.bandwidth.total_average; agg_result.msgrate.total_average += tctx[i].result.msgrate.total_average; lat_sum_total_avegare += tctx[i].result.latency.total_average; } agg_result.latency.total_average = lat_sum_total_avegare / thread_count; rte_call(perf, report, &agg_result, perf->params.report_arg, 1, 1); } static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result) { ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */ int ti, thread_count = perf->params.thread_count; ucs_status_t* statuses; ucs_status_t status; omp_set_num_threads(thread_count); statuses = calloc(thread_count, sizeof(ucs_status_t)); if (statuses == NULL) { status = UCS_ERR_NO_MEMORY; goto out; } #pragma omp parallel private(ti) { ti = omp_get_thread_num(); tctx[ti].status = ucx_perf_thread_run_test((void*)&tctx[ti]); } status = UCS_OK; for (ti = 0; ti < thread_count; ti++) { if (UCS_OK != tctx[ti].status) { ucs_error("Thread %d failed to run test: %s", tctx[ti].tid, ucs_status_string(tctx[ti].status)); status = tctx[ti].status; } } ucx_perf_thread_report_aggregated_results(perf); free(statuses); out: return status; } #else static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result) { ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)"); return UCS_ERR_INVALID_PARAM; } #endif /* _OPENMP */ void ucx_perf_global_init() { static ucx_perf_allocator_t host_allocator = { .mem_type = UCS_MEMORY_TYPE_HOST, .init = ucs_empty_function_return_success, .ucp_alloc = ucp_perf_test_alloc_host, .ucp_free = ucp_perf_test_free_host, .uct_alloc = uct_perf_test_alloc_host, .uct_free = uct_perf_test_free_host, .memcpy = ucx_perf_test_memcpy_host, .memset = memset }; UCS_MODULE_FRAMEWORK_DECLARE(ucx_perftest); ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_HOST] = &host_allocator; /* FIXME Memtype allocator modules must be loaded to global scope, otherwise * alloc hooks, which are using dlsym() to get pointer to original function, * do not work. Need to use bistro for memtype hooks to fix it. */ UCS_MODULE_FRAMEWORK_LOAD(ucx_perftest, UCS_MODULE_LOAD_FLAG_GLOBAL); }
softmax-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file softmax-inl.h * \brief */ #ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #define MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #include <algorithm> #include <string> #include <utility> #include <vector> #include <type_traits> #include "../mxnet_op.h" #include "../operator_common.h" #include "../tensor/broadcast_reduce_op.h" #include "../../common/cuda_utils.h" namespace mxnet { namespace op { namespace mxnet_op { struct softmax_fwd { template<typename AType> MSHADOW_XINLINE static AType Map(float a, AType b) { return AType(expf(a)/b); } template<typename AType> MSHADOW_XINLINE static AType Map(double a, AType b) { return AType(exp(a)/b); } }; struct log_softmax_fwd { template<typename DType> MSHADOW_XINLINE static float Map(DType a, float b) { return a - logf(b); } template<typename DType> MSHADOW_XINLINE static double Map(DType a, double b) { return a - log(b); } }; template<typename OP, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void Softmax(Stream<cpu> *s, DType *in, OType *out, IType *length, Shape<ndim> shape, int axis, const DType temperature) { index_t M = shape[axis]; if (M == 0) return; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; if (length == nullptr) { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); DType mmax = negate ? -in[base] : in[base]; DType val; for (index_t j = 1; j < M; ++j) { val = negate ? -in[base + j*sa] : in[base + j*sa]; if (mmax < val) mmax = val; } AType sum = AType(0); DType in_val; // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp(in_val - mmax); } for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map(in_val - mmax, sum); } } else { for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp((in_val - mmax)/temperature); } for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum); } } } } else { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t len = static_cast<index_t>(length[i]); index_t base = unravel_dot(i, sshape, stride); DType mmax = negate ? -in[base] : in[base]; DType val; for (index_t j = 1; j < len; ++j) { val = negate ? -in[base + j*sa] : in[base + j*sa]; if (mmax < val) mmax = val; } for (index_t j = len; j < M; ++j) { out[base + j*sa] = OType(0.0f); } AType sum = AType(0); DType in_val; // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime if (temperature == 1.0) { for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp(in_val - mmax); } for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map(in_val - mmax, sum); } } else { for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp((in_val - mmax)/temperature); } for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum); } } } } } struct softmax_bwd { template<typename DType, typename AType> MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) { return AType(out * (ograd - sum)); } }; struct log_softmax_bwd { template<typename AType> MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) { return AType(ograd - expf(out)*sum); } template<typename AType> MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) { return AType(ograd - exp(out)*sum); } }; template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void SoftmaxGrad(Stream<cpu> *s, OType *out, OType *ograd, DType *igrad, IType *length, Shape<ndim> shape, int axis, const DType temperature) { index_t M = shape[axis]; if (M == 0) return; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; if (length != nullptr) { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); index_t len = static_cast<index_t>(length[i]); AType sum = AType(0); for (index_t j = 0; j < len; ++j) { sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]); } // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum); final_result = (j < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature; final_result = (j < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } } } else { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); AType sum = AType(0); for (index_t j = 0; j < M; ++j) { sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]); } // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum); KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature; KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } } } } #ifdef __CUDACC__ template<int x_bits, typename OP, bool negate, typename AType, int ndim, typename DType, typename OType, typename IType> __global__ void softmax_compute_kernel(DType *in, OType *out, IType *length, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride, const double temperature) { const unsigned x_size = 1 << x_bits; __shared__ AType smem[x_size]; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t x = threadIdx.x; const index_t len = length == nullptr ? M : static_cast<index_t>(length[blockIdx.x]); red::maximum::SetInitValue(smem[x]); for (index_t i = x; i < len; i += x_size) { smem[x] = ::max(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]); } __syncthreads(); cuda::Reduce1D<red::maximum, x_bits>(smem); __syncthreads(); DType smax = smem[0]; __syncthreads(); red::sum::SetInitValue(smem[x]); DType val; for (index_t i = x; i < len; i += x_size) { val = negate ? -in[base + i*sa]:in[base + i*sa]; smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature))); } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); AType ssum = smem[0]; __syncthreads(); for (index_t i = x; i < M; i += x_size) { val = negate ? -in[base + i*sa] : in[base + i*sa]; out[base + i*sa] = (i < len) ? OType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : OType(0.0f); } } const int softmax_threads_per_block = 512; template<typename OP, bool negate, typename AType, typename LType, typename DType, typename OType, typename IType> __global__ void softmax_stride1_compute_kernel(const DType *in, OType *out, IType *length, const index_t M, const double temperature, const int rows_per_block, const index_t total_rows) { __shared__ AType scratch[softmax_threads_per_block]; __shared__ LType persistent_storage[20 * 1024 / sizeof(LType)]; const int warp_size = 32; const int threads_per_row = softmax_threads_per_block / rows_per_block; const int my_local_row = threadIdx.x / threads_per_row; const int my_row = blockIdx.x * rows_per_block + my_local_row; if (my_row >= total_rows) return; const int my_id = threadIdx.x % threads_per_row; const int entries_per_load = sizeof(LType)/sizeof(DType); const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]); // Due to usage of MSHADOW_TYPE_SWITCH macro we are generating // kernels where sizeof(LType) may be less than sizeof(DType), // resulting in entries_per_load being 0. // This is not a valid combination and is being checked against // in the launcher code. This switch here is just to silence // the division by zero warning generated for such invalid cases. const int row_length = entries_per_load > 0 ? M / entries_per_load : 0; const LType* in_aligned = reinterpret_cast<const LType*>(in); size_t base = my_row * row_length; for (index_t i = my_id; i < row_length; i += threads_per_row) { persistent_storage[my_local_row * row_length + i] = in_aligned[base + i]; } DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length); __syncthreads(); DType my_max_value; red::maximum::SetInitValue(my_max_value); for (index_t i = my_id; i < len; i += threads_per_row) { my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]); } scratch[threadIdx.x] = my_max_value; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]); } __syncthreads(); } if (my_id < warp_size) { AType my_value = warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return ::max(x, y); }); scratch[threadIdx.x] = my_value; } __syncthreads(); DType smax = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); AType my_sum; red::sum::SetInitValue(my_sum); for (index_t i = my_id; i < len; i += threads_per_row) { const DType val = negate ? -row[i] : row[i]; my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature))); } scratch[threadIdx.x] = my_sum; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] += scratch[threadIdx.x + size]; } __syncthreads(); } if (my_id < warp_size) { AType my_value = warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return x + y;}); scratch[threadIdx.x] = my_value; } __syncthreads(); AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); for (index_t i = my_id; i < M; i += threads_per_row) { const DType val = negate ? -row[i] : row[i]; row[i] = (i < len) ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : DType(0.0f); } __syncthreads(); LType* out_aligned = reinterpret_cast<LType*>(out); for (index_t i = my_id; i < row_length; i += threads_per_row) { out_aligned[base + i] = persistent_storage[my_local_row * row_length + i]; } } template<typename OP, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void Softmax(Stream<gpu> *s, DType *in, OType *out, IType *length, Shape<ndim> shape, int axis, const double temperature) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = shape[axis]; if (M == 0 || shape.Size() == 0) return; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; const size_t DSize = sizeof(DType); // Using 20 kB of shared memory for persistent storage in the optimized case const size_t max_opt_M = 20 * 1024 / DSize; if (stride[axis] == 1 && static_cast<size_t>(M) <= max_opt_M && std::is_same<DType, OType>::value) { int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType)); MXNET_LOAD_TYPE_SWITCH(ltype, LType, { int rows_per_block = mxnet::common::cuda::get_rows_per_block(M * sizeof(DType) / sizeof(LType), softmax_threads_per_block); int nblocks = (N + rows_per_block - 1) / rows_per_block; CHECK_LE(sizeof(DType), sizeof(LType)); softmax_stride1_compute_kernel<OP, negate, AType, LType> <<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, length, M, temperature, rows_per_block, N); }); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_compute_kernel); } else { softmax_compute_kernel<x_bits, OP, negate, AType, ndim> <<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, length, M, axis, sshape, stride, temperature); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_compute_kernel); } } template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType, typename DType, typename OType, typename IType> __global__ void softmax_stride1_grad_kernel(const OType *out, const OType *ograd, DType *igrad, const IType *length, const index_t M, const double temperature, const int rows_per_block, const index_t total_rows) { __shared__ AType scratch[softmax_threads_per_block]; __shared__ LType persistent_storage[20 * 1024 / sizeof(LType)]; const int warp_size = 32; const int threads_per_row = softmax_threads_per_block / rows_per_block; const int my_local_row = threadIdx.x / threads_per_row; const int my_row = blockIdx.x * rows_per_block + my_local_row; if (my_row >= total_rows) return; const int my_id = threadIdx.x % threads_per_row; const int entries_per_load = sizeof(LType)/sizeof(DType); const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]); // Due to usage of MSHADOW_TYPE_SWITCH macro we are generating // kernels where sizeof(LType) may be less than sizeof(DType), // resulting in entries_per_load being 0. // This is not a valid combination and is being checked against // in the launcher code. This switch here is just to silence // the division by zero warning generated for such invalid cases. const int row_length = entries_per_load > 0 ? M / entries_per_load : 0; const LType* out_aligned = reinterpret_cast<const LType*>(out); const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd); size_t base = my_row * row_length; for (index_t i = my_id; i < row_length; i += threads_per_row) { persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i]; persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i]; } DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length * 2); __syncthreads(); AType my_sum_value; red::sum::SetInitValue(my_sum_value); for (index_t i = my_id; i < len; i += threads_per_row) { my_sum_value += OP1::Map(row[i + M], row[i]); } scratch[threadIdx.x] = my_sum_value; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size]; } __syncthreads(); } if (my_id < warp_size) { AType my_value = warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return x + y; }); scratch[threadIdx.x] = my_value; } __syncthreads(); AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); for (index_t i = my_id; i < M; i += threads_per_row) { const DType val = negate ? -OP2::Map(row[i + M], row[i], ssum) : OP2::Map(row[i + M], row[i], ssum); row[i] = (i < len) ? DType(val / static_cast<DType>(temperature)) : DType(0.0f); if (Req == kAddTo) { row[i] += igrad[my_row * M + i]; } } __syncthreads(); LType* igrad_aligned = reinterpret_cast<LType*>(igrad); for (index_t i = my_id; i < row_length; i += threads_per_row) { igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i]; } } template<int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType, typename OType, typename IType> __global__ void softmax_grad_kernel(OType *out, OType *ograd, DType *igrad, const IType *length, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride, const double temperature) { const unsigned x_size = 1 << x_bits; __shared__ AType smem[x_size]; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t x = threadIdx.x; index_t len = length != nullptr ? static_cast<index_t>(length[blockIdx.x]) : M; red::sum::SetInitValue(smem[x]); for (index_t i = x; i < len; i += x_size) { smem[x] += OP1::Map(ograd[base + i*sa], out[base + i*sa]); } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); AType ssum = smem[0]; __syncthreads(); DType final_result; for (index_t i = x; i < M; i += x_size) { final_result = negate ? -OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) : OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum); final_result = (i < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result / static_cast<DType>(temperature)); } } template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType, typename OType, typename IType> inline void SoftmaxGrad(Stream<gpu> *s, OType *out, OType *ograd, DType *igrad, IType *length, Shape<ndim> shape, int axis, const double temperature) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = shape[axis]; if (M == 0 || shape.Size() == 0) return; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; const size_t DSize = sizeof(DType); // Using 20 kB of shared memory for persistent storage in the optimized case // Need to store both out and ograd, so M can be only half compared to // forward pass. const size_t max_opt_M = 20 * 1024 / DSize / 2; if (stride[axis] == 1 && static_cast<size_t>(M) <= max_opt_M && std::is_same<DType, OType>::value) { int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType)); MXNET_LOAD_TYPE_SWITCH(ltype, LType, { int rows_per_block = mxnet::common::cuda::get_rows_per_block(M * sizeof(DType) / sizeof(LType), softmax_threads_per_block); int nblocks = (N + rows_per_block - 1) / rows_per_block; CHECK_LE(sizeof(DType), sizeof(LType)); softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType> <<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>( out, ograd, igrad, length, M, temperature, rows_per_block, N); }); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_grad_kernel); } else { softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim> <<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>( out, ograd, igrad, length, M, axis, sshape, stride, temperature); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_grad_kernel); } } #endif } // namespace mxnet_op struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> { int axis; dmlc::optional<double> temperature; dmlc::optional<int> dtype; dmlc::optional<bool> use_length; DMLC_DECLARE_PARAMETER(SoftmaxParam) { DMLC_DECLARE_FIELD(axis).set_default(-1) .describe("The axis along which to compute softmax."); DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>()) .describe("Temperature parameter in softmax"); DMLC_DECLARE_FIELD(dtype) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(dmlc::optional<int>()) .describe("DType of the output in case this can't be inferred. " "Defaults to the same as input's dtype if not defined (dtype=None)."); DMLC_DECLARE_FIELD(use_length) .set_default(dmlc::optional<bool>(false)) .describe("Whether to use the length input as a mask over the data input."); } bool operator==(const SoftmaxParam& other) const { return this->axis == other.axis && this->temperature == other.temperature && this->dtype == other.dtype && this->use_length == other.use_length; } }; static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) { const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); return param.dtype.has_value() && param.dtype.value() != -1; } static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) { const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); return param.use_length.value(); } static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), 1); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U); if (softmax_has_dtype_override(attrs)) { TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value()); type_assign(&(*in_attrs)[0], (*out_attrs)[0]); return true; } else { std::vector<int> tmp = {in_attrs->at(0)}; return ElemwiseType<1, 1>(attrs, &tmp, out_attrs); } } static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(out_attrs->size(), 1U); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U); if (param.use_length.value()) { mxnet::TShape& dshape = in_attrs->at(0); mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1); int j = 0; int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1; for (int i = 0; i < dshape.ndim(); ++i) { if (i != axis) { tmp_shape[j++] = dshape[i]; } } SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape); } mxnet::ShapeVector tmp = {in_attrs->at(0)}; return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs); } static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)}; mxnet::ShapeVector dgrad = {out_attrs->at(0)}; bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad); SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]); SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]); SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]); SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]); mxnet::ShapeVector length = {in_attrs->at(2)}; mxnet::ShapeVector lgrad = {out_attrs->at(1)}; res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad)); SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]); SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]); return res; } else { return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs); } } else { return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs); } } static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U); if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U); int in_dtype = (*in_attrs)[1]; int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2]; TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype); if (softmax_use_length(attrs)) { TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2)); } return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 && (!softmax_use_length(attrs) || ((*out_attrs)[1] != -1 && (*in_attrs)[1] != -1)); } else { CHECK_EQ(in_attrs->size(), 2U); int out_dtype = (*in_attrs)[1]; TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype); return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1; } } static inline std::vector<std::pair<int, int> > SoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 1}, {3, 0}}; } else { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 0}}; } } else { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; } } static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { return softmax_use_length(attrs) ? 4 : 3; } return 2; } static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { return std::vector<std::string>{"ograd", "data", "length", "output"}; } else { return std::vector<std::string>{"ograd", "data", "output"}; } } else { return std::vector<std::string>{"ograd", "output"}; } } struct SoftmaxFGradient { const char *op_name; std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) const { if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) { return ElemwiseGradUseInOut {op_name}(n, ograds); } else { return ElemwiseGradUseOut {op_name}(n, ograds); } } }; template<typename xpu, typename OP, bool negate = false> void SoftmaxCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp || inputs[0].Size() == 0U) return; CHECK_NE(req[0], kAddTo); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false); if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) { common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. " "See https://mxnet.apache.org/api/faq/env_var " "for more details."); } MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, { int type = kInt32; if (param.use_length.value()) { CHECK(inputs.size() > 1) << "Mask needs to be provided when using softmax with use_length=True."; type = inputs[1].type_flag_; } MXNET_INT32_INT64_TYPE_SWITCH(type, IType, { IType* mask_ptr = nullptr; if (param.use_length.value()) { mask_ptr = inputs[1].dptr<IType>(); } if (safe_acc) { if (shape.ndim() == 2) { Softmax<OP, negate, AType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { Softmax<OP, negate, AType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } else { if (shape.ndim() == 2) { Softmax<OP, negate, DType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { Softmax<OP, negate, DType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } }); }); }); } template<typename xpu, typename OP1, typename OP2, bool negate = false> void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (softmax_use_length(attrs)) { MXNET_INT32_INT64_TYPE_SWITCH(inputs[2].type_flag_, IType, { if (req[1] != kNullOp) { mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch( ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>()); } }); } if (req[0] == kNullOp) return; const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32; const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1; out_idx = softmax_use_length(attrs) ? 3 : out_idx; bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false); MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_INT32_INT64_TYPE_SWITCH(itype, IType, { IType * length_ptr = nullptr; if (softmax_use_length(attrs)) { length_ptr = inputs[2].dptr<IType>(); } if (safe_acc) { if (shape.ndim() == 2) { SoftmaxGrad<OP1, OP2, Req, negate, AType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { SoftmaxGrad<OP1, OP2, Req, negate, AType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } else { if (shape.ndim() == 2) { SoftmaxGrad<OP1, OP2, Req, negate, DType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { SoftmaxGrad<OP1, OP2, Req, negate, DType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } }); }); }); }); } } // namespace op } // namespace mxnet namespace std { template<> struct hash<mxnet::op::SoftmaxParam> { size_t operator()(const mxnet::op::SoftmaxParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.axis); ret = dmlc::HashCombine(ret, val.temperature); ret = dmlc::HashCombine(ret, val.dtype); ret = dmlc::HashCombine(ret, val.use_length); return ret; } }; } // namespace std #endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
BubbleFreeN50.h
/////////////////////////////////////////////////////////////////////////////// // SOFTWARE COPYRIGHT NOTICE AGREEMENT // // This software and its documentation are copyright (2012) by the // // Broad Institute. All rights are reserved. This software is supplied // // without any warranty or guaranteed support whatsoever. The Broad // // Institute is not responsible for its use, misuse, or functionality. // /////////////////////////////////////////////////////////////////////////////// // // Author: Neil Weisenfeld - Mar 10, 2014 - <crdhelp@broadinstitute.org> // // MakeDepend: library OMP // MakeDepend: cflags OMP_FLAGS #ifndef BUBBLEFREEN50_H_ #define BUBBLEFREEN50_H_ #include "paths/HyperBasevector.h" class BubbleFreeN50 { public: explicit BubbleFreeN50( HyperBasevector const& hbv, int min_len = 0 ) { vec<int> new_edges( hbv.EdgeObjectCount( ) ); for ( int e = 0; e < hbv.EdgeObjectCount( ); e++ ) new_edges[e] = hbv.EdgeLengthKmers(e); digraphE<int> size_graph( hbv, new_edges ); // report original stats mPreNEdges = size_graph.Edges().size(); mPreN50 = N50( size_graph.Edges() ); // pop bubbles vec<int> to_delete; to_delete.reserve(size_graph.N()); #pragma omp for for ( int vi = 0; vi < size_graph.N(); ++vi ) { if ( size_graph.FromSize(vi) == 2 && size_graph.From(vi)[0] == size_graph.From(vi)[1] ) { int iedge0 = size_graph.EdgeObjectIndexByIndexFrom(vi,1); int iedge1 = size_graph.EdgeObjectIndexByIndexFrom(vi,0); int sum = size_graph.EdgeObject(iedge0) + size_graph.EdgeObject(iedge1); // change edge 0, delete edge 1 size_graph.EdgeObjectMutable(iedge0) = sum / 2; to_delete.push_back( iedge1 ); } } UniqueSort(to_delete); size_graph.DeleteEdges(to_delete); // cout << "cleanup..." << endl; for ( int i = 0; i < size_graph.N(); ++i ) { if ( size_graph.From(i).size() == 1 && size_graph.To(i).size() == 1 && size_graph.From(i)[0] != i ) { size_graph.JoinEdges(i, size_graph.EdgeObjectByIndexTo(i,0) + size_graph.EdgeObjectByIndexFrom(i,0) ); } } vec<int> del2; for ( int e = 0; e < size_graph.EdgeObjectCount( ); e++ ) if ( size_graph.EdgeObject(e) + hbv.K( ) - 1 < min_len ) del2.push_back(e); size_graph.DeleteEdges(del2); // Clear out edges that have been removed from the graph // and vertices with no edges. size_graph.RemoveEdgelessVertices( ); size_graph.RemoveDeadEdgeObjects( ); // report N50 mPostNEdges = size_graph.Edges().size(); mPostN50 = N50( size_graph.Edges() ) + hbv.K( ) - 1; } int PreN50() { return mPreN50; } int PostN50() { return mPostN50; } size_t PreNEdges() { return mPreNEdges; } size_t PostNedges() { return mPostNEdges; } private: int mPreN50; int mPostN50; size_t mPreNEdges; size_t mPostNEdges; }; #endif /* BUBBLEFREEN50_H_ */
cpplex.c
/* Copyright 2003, 2004, 2005, 2006 PathScale, Inc. All Rights Reserved. File modified October 3, 2003 by PathScale, Inc. to update Open64 C/C++ front-ends to GNU 3.3.1 release. */ /* CPP Library - lexical analysis. Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc. Contributed by Per Bothner, 1994-95. Based on CCCP program by Paul Rubin, June 1986 Adapted to ANSI C, Richard Stallman, Jan 1987 Broken out to separate file, Zack Weinberg, Mar 2000 Single-pass line tokenization by Neil Booth, April 2000 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "config.h" #include "system.h" #include "cpplib.h" #include "cpphash.h" #ifdef MULTIBYTE_CHARS #include "mbchar.h" #include <locale.h> #endif #ifdef SGI_MONGOOSE // To get TARGET_*, WCHAR_UNSIGNED #include "defaults.h" #endif /* SGI_MONGOOSE */ /* Tokens with SPELL_STRING store their spelling in the token list, and it's length in the token->val.name.len. */ enum spell_type { SPELL_OPERATOR = 0, SPELL_CHAR, SPELL_IDENT, SPELL_NUMBER, SPELL_STRING, SPELL_NONE }; struct token_spelling { enum spell_type category; const unsigned char *name; }; static const unsigned char *const digraph_spellings[] = { U"%:", U"%:%:", U"<:", U":>", U"<%", U"%>" }; #define OP(e, s) { SPELL_OPERATOR, U s }, #define TK(e, s) { s, U STRINGX (e) }, static const struct token_spelling token_spellings[N_TTYPES] = { TTYPE_TABLE }; #undef OP #undef TK #define TOKEN_SPELL(token) (token_spellings[(token)->type].category) #define TOKEN_NAME(token) (token_spellings[(token)->type].name) #define BACKUP() do {buffer->cur = buffer->backup_to;} while (0) static void handle_newline PARAMS ((cpp_reader *)); static cppchar_t skip_escaped_newlines PARAMS ((cpp_reader *)); static cppchar_t get_effective_char PARAMS ((cpp_reader *)); static int skip_block_comment PARAMS ((cpp_reader *)); static int skip_line_comment PARAMS ((cpp_reader *)); static void adjust_column PARAMS ((cpp_reader *)); static int skip_whitespace PARAMS ((cpp_reader *, cppchar_t)); static cpp_hashnode *parse_identifier PARAMS ((cpp_reader *)); static uchar *parse_slow PARAMS ((cpp_reader *, const uchar *, int, unsigned int *)); static void parse_number PARAMS ((cpp_reader *, cpp_string *, int)); static int unescaped_terminator_p PARAMS ((cpp_reader *, const uchar *)); static void parse_string PARAMS ((cpp_reader *, cpp_token *, cppchar_t)); static bool trigraph_p PARAMS ((cpp_reader *)); static void save_comment PARAMS ((cpp_reader *, cpp_token *, const uchar *, cppchar_t)); static bool continue_after_nul PARAMS ((cpp_reader *)); static int name_p PARAMS ((cpp_reader *, const cpp_string *)); static int maybe_read_ucs PARAMS ((cpp_reader *, const unsigned char **, const unsigned char *, cppchar_t *)); static tokenrun *next_tokenrun PARAMS ((tokenrun *)); static unsigned int hex_digit_value PARAMS ((unsigned int)); static _cpp_buff *new_buff PARAMS ((size_t)); /* Utility routine: Compares, the token TOKEN to the NUL-terminated string STRING. TOKEN must be a CPP_NAME. Returns 1 for equal, 0 for unequal. */ int cpp_ideq (token, string) const cpp_token *token; const char *string; { if (token->type != CPP_NAME) return 0; return !ustrcmp (NODE_NAME (token->val.node), (const uchar *) string); } #ifdef KEY extern bool in_omp_pragma; bool seen_omp_paren = FALSE; #endif /* Call when meeting a newline, assumed to be in buffer->cur[-1]. Returns with buffer->cur pointing to the character immediately following the newline (combination). */ static void handle_newline (pfile) cpp_reader *pfile; { cpp_buffer *buffer = pfile->buffer; /* Handle CR-LF and LF-CR. Most other implementations (e.g. java) only accept CR-LF; maybe we should fall back to that behavior? */ if (buffer->cur[-1] + buffer->cur[0] == '\r' + '\n') buffer->cur++; buffer->line_base = buffer->cur; buffer->col_adjust = 0; pfile->line++; } /* Subroutine of skip_escaped_newlines; called when a 3-character sequence beginning with "??" is encountered. buffer->cur points to the second '?'. Warn if necessary, and returns true if the sequence forms a trigraph and the trigraph should be honored. */ static bool trigraph_p (pfile) cpp_reader *pfile; { cpp_buffer *buffer = pfile->buffer; cppchar_t from_char = buffer->cur[1]; bool accept; if (!_cpp_trigraph_map[from_char]) return false; accept = CPP_OPTION (pfile, trigraphs); /* Don't warn about trigraphs in comments. */ if (CPP_OPTION (pfile, warn_trigraphs) && !pfile->state.lexing_comment) { if (accept) cpp_error_with_line (pfile, DL_WARNING, pfile->line, CPP_BUF_COL (buffer) - 1, "trigraph ??%c converted to %c", (int) from_char, (int) _cpp_trigraph_map[from_char]); else if (buffer->cur != buffer->last_Wtrigraphs) { buffer->last_Wtrigraphs = buffer->cur; cpp_error_with_line (pfile, DL_WARNING, pfile->line, CPP_BUF_COL (buffer) - 1, "trigraph ??%c ignored", (int) from_char); } } return accept; } /* Skips any escaped newlines introduced by '?' or a '\\', assumed to lie in buffer->cur[-1]. Returns the next byte, which will be in buffer->cur[-1]. This routine performs preprocessing stages 1 and 2 of the ISO C standard. */ static cppchar_t skip_escaped_newlines (pfile) cpp_reader *pfile; { cpp_buffer *buffer = pfile->buffer; cppchar_t next = buffer->cur[-1]; /* Only do this if we apply stages 1 and 2. */ if (!buffer->from_stage3) { const unsigned char *saved_cur; cppchar_t next1; do { if (next == '?') { if (buffer->cur[0] != '?' || !trigraph_p (pfile)) break; /* Translate the trigraph. */ next = _cpp_trigraph_map[buffer->cur[1]]; buffer->cur += 2; if (next != '\\') break; } if (buffer->cur == buffer->rlimit) break; /* We have a backslash, and room for at least one more character. Skip horizontal whitespace. */ saved_cur = buffer->cur; do next1 = *buffer->cur++; while (is_nvspace (next1) && buffer->cur < buffer->rlimit); if (!is_vspace (next1)) { buffer->cur = saved_cur; break; } if (saved_cur != buffer->cur - 1 && !pfile->state.lexing_comment) cpp_error (pfile, DL_WARNING, "backslash and newline separated by space"); handle_newline (pfile); buffer->backup_to = buffer->cur; if (buffer->cur == buffer->rlimit) { cpp_error (pfile, DL_PEDWARN, "backslash-newline at end of file"); next = EOF; } else next = *buffer->cur++; } while (next == '\\' || next == '?'); } return next; } /* Obtain the next character, after trigraph conversion and skipping an arbitrarily long string of escaped newlines. The common case of no trigraphs or escaped newlines falls through quickly. On return, buffer->backup_to points to where to return to if the character is not to be processed. */ static cppchar_t get_effective_char (pfile) cpp_reader *pfile; { cppchar_t next; cpp_buffer *buffer = pfile->buffer; buffer->backup_to = buffer->cur; next = *buffer->cur++; if (__builtin_expect (next == '?' || next == '\\', 0)) next = skip_escaped_newlines (pfile); return next; } /* Skip a C-style block comment. We find the end of the comment by seeing if an asterisk is before every '/' we encounter. Returns nonzero if comment terminated by EOF, zero otherwise. */ static int skip_block_comment (pfile) cpp_reader *pfile; { cpp_buffer *buffer = pfile->buffer; cppchar_t c = EOF, prevc = EOF; pfile->state.lexing_comment = 1; while (buffer->cur != buffer->rlimit) { prevc = c, c = *buffer->cur++; /* FIXME: For speed, create a new character class of characters of interest inside block comments. */ if (c == '?' || c == '\\') c = skip_escaped_newlines (pfile); /* People like decorating comments with '*', so check for '/' instead for efficiency. */ if (c == '/') { if (prevc == '*') break; /* Warn about potential nested comments, but not if the '/' comes immediately before the true comment delimiter. Don't bother to get it right across escaped newlines. */ if (CPP_OPTION (pfile, warn_comments) && buffer->cur[0] == '*' && buffer->cur[1] != '/') cpp_error_with_line (pfile, DL_WARNING, pfile->line, CPP_BUF_COL (buffer), "\"/*\" within comment"); } else if (is_vspace (c)) handle_newline (pfile); else if (c == '\t') adjust_column (pfile); } pfile->state.lexing_comment = 0; return c != '/' || prevc != '*'; } /* Skip a C++ line comment, leaving buffer->cur pointing to the terminating newline. Handles escaped newlines. Returns nonzero if a multiline comment. */ static int skip_line_comment (pfile) cpp_reader *pfile; { cpp_buffer *buffer = pfile->buffer; unsigned int orig_line = pfile->line; cppchar_t c; #ifdef MULTIBYTE_CHARS wchar_t wc; int char_len; #endif pfile->state.lexing_comment = 1; #ifdef MULTIBYTE_CHARS /* Reset multibyte conversion state. */ (void) local_mbtowc (NULL, NULL, 0); #endif do { if (buffer->cur == buffer->rlimit) goto at_eof; #ifdef MULTIBYTE_CHARS char_len = local_mbtowc (&wc, (const char *) buffer->cur, buffer->rlimit - buffer->cur); if (char_len == -1) { cpp_error (pfile, DL_WARNING, "ignoring invalid multibyte character"); char_len = 1; c = *buffer->cur++; } else { buffer->cur += char_len; c = wc; } #else c = *buffer->cur++; #endif if (c == '?' || c == '\\') c = skip_escaped_newlines (pfile); } while (!is_vspace (c)); /* Step back over the newline, except at EOF. */ buffer->cur--; at_eof: pfile->state.lexing_comment = 0; return orig_line != pfile->line; } /* pfile->buffer->cur is one beyond the \t character. Update col_adjust so we track the column correctly. */ static void adjust_column (pfile) cpp_reader *pfile; { cpp_buffer *buffer = pfile->buffer; unsigned int col = CPP_BUF_COL (buffer) - 1; /* Zero-based column. */ /* Round it up to multiple of the tabstop, but subtract 1 since the tab itself occupies a character position. */ buffer->col_adjust += (CPP_OPTION (pfile, tabstop) - col % CPP_OPTION (pfile, tabstop)) - 1; } /* Skips whitespace, saving the next non-whitespace character. Adjusts pfile->col_adjust to account for tabs. Without this, tokens might be assigned an incorrect column. */ static int skip_whitespace (pfile, c) cpp_reader *pfile; cppchar_t c; { cpp_buffer *buffer = pfile->buffer; unsigned int warned = 0; do { /* Horizontal space always OK. */ if (c == ' ') ; else if (c == '\t') adjust_column (pfile); /* Just \f \v or \0 left. */ else if (c == '\0') { if (buffer->cur - 1 == buffer->rlimit) return 0; if (!warned) { cpp_error (pfile, DL_WARNING, "null character(s) ignored"); warned = 1; } } else if (pfile->state.in_directive && CPP_PEDANTIC (pfile)) cpp_error_with_line (pfile, DL_PEDWARN, pfile->line, CPP_BUF_COL (buffer), "%s in preprocessing directive", c == '\f' ? "form feed" : "vertical tab"); c = *buffer->cur++; } /* We only want non-vertical space, i.e. ' ' \t \f \v \0. */ while (is_nvspace (c)); buffer->cur--; return 1; } /* See if the characters of a number token are valid in a name (no '.', '+' or '-'). */ static int name_p (pfile, string) cpp_reader *pfile; const cpp_string *string; { unsigned int i; for (i = 0; i < string->len; i++) if (!is_idchar (string->text[i])) return 0; return 1; } /* Parse an identifier, skipping embedded backslash-newlines. This is a critical inner loop. The common case is an identifier which has not been split by backslash-newline, does not contain a dollar sign, and has already been scanned (roughly 10:1 ratio of seen:unseen identifiers in normal code; the distribution is Poisson-like). Second most common case is a new identifier, not split and no dollar sign. The other possibilities are rare and have been relegated to parse_slow. */ static cpp_hashnode * parse_identifier (pfile) cpp_reader *pfile; { cpp_hashnode *result; const uchar *cur, *base; /* Fast-path loop. Skim over a normal identifier. N.B. ISIDNUM does not include $. */ cur = pfile->buffer->cur; while (ISIDNUM (*cur)) cur++; /* Check for slow-path cases. */ if (*cur == '?' || *cur == '\\' || *cur == '$') { unsigned int len; base = parse_slow (pfile, cur, 0, &len); result = (cpp_hashnode *) ht_lookup (pfile->hash_table, base, len, HT_ALLOCED); } else { base = pfile->buffer->cur - 1; pfile->buffer->cur = cur; result = (cpp_hashnode *) ht_lookup (pfile->hash_table, base, cur - base, HT_ALLOC); } /* Rarely, identifiers require diagnostics when lexed. XXX Has to be forced out of the fast path. */ if (__builtin_expect ((result->flags & NODE_DIAGNOSTIC) && !pfile->state.skipping, 0)) { /* It is allowed to poison the same identifier twice. */ if ((result->flags & NODE_POISONED) && !pfile->state.poisoned_ok) cpp_error (pfile, DL_ERROR, "attempt to use poisoned \"%s\"", NODE_NAME (result)); /* Constraint 6.10.3.5: __VA_ARGS__ should only appear in the replacement list of a variadic macro. */ if (result == pfile->spec_nodes.n__VA_ARGS__ && !pfile->state.va_args_ok) cpp_error (pfile, DL_PEDWARN, "__VA_ARGS__ can only appear in the expansion of a C99 variadic macro"); } return result; } /* Slow path. This handles numbers and identifiers which have been split, or contain dollar signs. The part of the token from PFILE->buffer->cur-1 to CUR has already been scanned. NUMBER_P is 1 if it's a number, and 2 if it has a leading period. Returns a pointer to the token's NUL-terminated spelling in permanent storage, and sets PLEN to its length. */ static uchar * parse_slow (pfile, cur, number_p, plen) cpp_reader *pfile; const uchar *cur; int number_p; unsigned int *plen; { cpp_buffer *buffer = pfile->buffer; const uchar *base = buffer->cur - 1; struct obstack *stack = &pfile->hash_table->stack; unsigned int c, prevc, saw_dollar = 0; /* Place any leading period. */ if (number_p == 2) obstack_1grow (stack, '.'); /* Copy the part of the token which is known to be okay. */ obstack_grow (stack, base, cur - base); /* Now process the part which isn't. We are looking at one of '$', '\\', or '?' on entry to this loop. */ prevc = cur[-1]; c = *cur++; buffer->cur = cur; for (;;) { /* Potential escaped newline? */ buffer->backup_to = buffer->cur - 1; if (c == '?' || c == '\\') c = skip_escaped_newlines (pfile); if (!is_idchar (c)) { if (!number_p) break; if (c != '.' && !VALID_SIGN (c, prevc)) break; } /* Handle normal identifier characters in this loop. */ do { prevc = c; obstack_1grow (stack, c); if (c == '$') saw_dollar++; c = *buffer->cur++; } while (is_idchar (c)); } /* Step back over the unwanted char. */ BACKUP (); /* $ is not an identifier character in the standard, but is commonly accepted as an extension. Don't warn about it in skipped conditional blocks. */ if (saw_dollar && CPP_PEDANTIC (pfile) && ! pfile->state.skipping) cpp_error (pfile, DL_PEDWARN, "'$' character(s) in identifier or number"); /* Identifiers and numbers are null-terminated. */ *plen = obstack_object_size (stack); obstack_1grow (stack, '\0'); return obstack_finish (stack); } /* Parse a number, beginning with character C, skipping embedded backslash-newlines. LEADING_PERIOD is nonzero if there was a "." before C. Place the result in NUMBER. */ static void parse_number (pfile, number, leading_period) cpp_reader *pfile; cpp_string *number; int leading_period; { const uchar *cur; /* Fast-path loop. Skim over a normal number. N.B. ISIDNUM does not include $. */ cur = pfile->buffer->cur; while (ISIDNUM (*cur) || *cur == '.' || VALID_SIGN (*cur, cur[-1])) cur++; /* Check for slow-path cases. */ if (*cur == '?' || *cur == '\\' || *cur == '$') number->text = parse_slow (pfile, cur, 1 + leading_period, &number->len); else { const uchar *base = pfile->buffer->cur - 1; uchar *dest; number->len = cur - base + leading_period; dest = _cpp_unaligned_alloc (pfile, number->len + 1); dest[number->len] = '\0'; number->text = dest; if (leading_period) *dest++ = '.'; memcpy (dest, base, cur - base); pfile->buffer->cur = cur; } } /* Subroutine of parse_string. */ static int unescaped_terminator_p (pfile, dest) cpp_reader *pfile; const unsigned char *dest; { const unsigned char *start, *temp; /* In #include-style directives, terminators are not escapeable. */ if (pfile->state.angled_headers) return 1; start = BUFF_FRONT (pfile->u_buff); /* An odd number of consecutive backslashes represents an escaped terminator. */ for (temp = dest; temp > start && temp[-1] == '\\'; temp--) ; return ((dest - temp) & 1) == 0; } /* Parses a string, character constant, or angle-bracketed header file name. Handles embedded trigraphs and escaped newlines. The stored string is guaranteed NUL-terminated, but it is not guaranteed that this is the first NUL since embedded NULs are preserved. When this function returns, buffer->cur points to the next character to be processed. */ static void parse_string (pfile, token, terminator) cpp_reader *pfile; cpp_token *token; cppchar_t terminator; { cpp_buffer *buffer = pfile->buffer; unsigned char *dest, *limit; cppchar_t c; bool warned_nulls = false; #ifdef MULTIBYTE_CHARS wchar_t wc; int char_len; #endif dest = BUFF_FRONT (pfile->u_buff); limit = BUFF_LIMIT (pfile->u_buff); #ifdef MULTIBYTE_CHARS /* Reset multibyte conversion state. */ (void) local_mbtowc (NULL, NULL, 0); #endif for (;;) { /* We need room for another char, possibly the terminating NUL. */ if ((size_t) (limit - dest) < 1) { size_t len_so_far = dest - BUFF_FRONT (pfile->u_buff); _cpp_extend_buff (pfile, &pfile->u_buff, 2); dest = BUFF_FRONT (pfile->u_buff) + len_so_far; limit = BUFF_LIMIT (pfile->u_buff); } #ifdef MULTIBYTE_CHARS char_len = local_mbtowc (&wc, (const char *) buffer->cur, buffer->rlimit - buffer->cur); if (char_len == -1) { cpp_error (pfile, DL_WARNING, "ignoring invalid multibyte character"); char_len = 1; c = *buffer->cur++; } else { buffer->cur += char_len; c = wc; } #else c = *buffer->cur++; #endif /* Handle trigraphs, escaped newlines etc. */ if (c == '?' || c == '\\') c = skip_escaped_newlines (pfile); if (c == terminator) { if (unescaped_terminator_p (pfile, dest)) break; } else if (is_vspace (c)) { /* No string literal may extend over multiple lines. In assembly language, suppress the error except for <> includes. This is a kludge around not knowing where comments are. */ unterminated: if (CPP_OPTION (pfile, lang) != CLK_ASM || terminator == '>') cpp_error (pfile, DL_ERROR, "missing terminating %c character", (int) terminator); buffer->cur--; break; } else if (c == '\0') { if (buffer->cur - 1 == buffer->rlimit) goto unterminated; if (!warned_nulls) { warned_nulls = true; cpp_error (pfile, DL_WARNING, "null character(s) preserved in literal"); } } #ifdef MULTIBYTE_CHARS if (char_len > 1) { for ( ; char_len > 0; --char_len) *dest++ = (*buffer->cur - char_len); } else #endif *dest++ = c; } *dest = '\0'; token->val.str.text = BUFF_FRONT (pfile->u_buff); token->val.str.len = dest - BUFF_FRONT (pfile->u_buff); BUFF_FRONT (pfile->u_buff) = dest + 1; } /* The stored comment includes the comment start and any terminator. */ static void save_comment (pfile, token, from, type) cpp_reader *pfile; cpp_token *token; const unsigned char *from; cppchar_t type; { unsigned char *buffer; unsigned int len, clen; len = pfile->buffer->cur - from + 1; /* + 1 for the initial '/'. */ /* C++ comments probably (not definitely) have moved past a new line, which we don't want to save in the comment. */ if (is_vspace (pfile->buffer->cur[-1])) len--; /* If we are currently in a directive, then we need to store all C++ comments as C comments internally, and so we need to allocate a little extra space in that case. Note that the only time we encounter a directive here is when we are saving comments in a "#define". */ clen = (pfile->state.in_directive && type == '/') ? len + 2 : len; buffer = _cpp_unaligned_alloc (pfile, clen); token->type = CPP_COMMENT; token->val.str.len = clen; token->val.str.text = buffer; buffer[0] = '/'; memcpy (buffer + 1, from, len - 1); /* Finish conversion to a C comment, if necessary. */ if (pfile->state.in_directive && type == '/') { buffer[1] = '*'; buffer[clen - 2] = '*'; buffer[clen - 1] = '/'; } } /* Allocate COUNT tokens for RUN. */ void _cpp_init_tokenrun (run, count) tokenrun *run; unsigned int count; { run->base = xnewvec (cpp_token, count); run->limit = run->base + count; run->next = NULL; } /* Returns the next tokenrun, or creates one if there is none. */ static tokenrun * next_tokenrun (run) tokenrun *run; { if (run->next == NULL) { run->next = xnew (tokenrun); run->next->prev = run; _cpp_init_tokenrun (run->next, 250); } return run->next; } /* Allocate a single token that is invalidated at the same time as the rest of the tokens on the line. Has its line and col set to the same as the last lexed token, so that diagnostics appear in the right place. */ cpp_token * _cpp_temp_token (pfile) cpp_reader *pfile; { cpp_token *old, *result; old = pfile->cur_token - 1; if (pfile->cur_token == pfile->cur_run->limit) { pfile->cur_run = next_tokenrun (pfile->cur_run); pfile->cur_token = pfile->cur_run->base; } result = pfile->cur_token++; result->line = old->line; result->col = old->col; return result; } #ifdef KEY enum pragma_type { INVALID, OMP, OPTIONS, UNROLL, #ifdef TARG_SL2 //fork_joint SL2, #endif EXEC_FREQ }; static enum pragma_type current_pragma = INVALID; bool last_token_omp_hash = FALSE; static cpp_token * _cpp_omp_token (cpp_reader * pfile) { cpp_buffer * buffer = pfile->buffer; cpp_token * result = NULL; const unsigned char *c = buffer->cur; const unsigned char *rlimit = buffer->rlimit; int len = 6; // strlen "pragma" while (*c == ' ' || *c == '\t') c++; if ((rlimit - c) < len || memcmp (c, "pragma", len)) return NULL; c += len; while (c < rlimit && (*c == ' ' || *c == '\t')) c++; len = 3; // now, strlen "omp" if ((rlimit - c) >= len && !memcmp (c, "omp", len)) { result = _cpp_lex_direct (pfile); // found #pragma omp current_pragma = OMP; } else if ((rlimit - c) >= strlen ("options") && !memcmp (c, "options", strlen ("options"))) { result = _cpp_lex_direct (pfile); current_pragma = OPTIONS; } else if (((rlimit - c) >= strlen ("mips_frequency_hint") && !memcmp (c, "mips_frequency_hint", strlen ("mips_frequency_hint"))) || ((rlimit - c) >= strlen ("frequency_hint") && !memcmp (c, "frequency_hint", strlen ("frequency_hint")))) { result = _cpp_lex_direct (pfile); current_pragma = EXEC_FREQ; } else if ((rlimit - c) >= strlen ("unroll") && !memcmp (c, "unroll", strlen ("unroll"))) { result = _cpp_lex_direct (pfile); current_pragma = UNROLL; } #ifdef TARG_SL //fork_joint else if((rlimit -c)>=strlen("sl2") && !memcmp(c, "sl2", strlen("sl2"))) { result = _cpp_lex_direct(pfile); current_pragma = SL2; } #endif return result; } // Skips the rest of the line till before the newline. Currently called // on seeing an OpenMP pragma when OpenMP is not enabled. static void skip_to_end_of_line (cpp_reader * pfile) { cpp_buffer * buffer = pfile->buffer; cppchar_t c; while (buffer->cur != buffer->rlimit) { c = *buffer->cur++; if (c == '\n') { // push back the newline into buffer buffer->cur--; break; } } } extern int flag_openmp; #endif // KEY /* Lex a token into RESULT (external interface). Takes care of issues like directive handling, token lookahead, multiple include optimization and skipping. */ const cpp_token * _cpp_lex_token (pfile) cpp_reader *pfile; { cpp_token *result; for (;;) { if (pfile->cur_token == pfile->cur_run->limit) { pfile->cur_run = next_tokenrun (pfile->cur_run); pfile->cur_token = pfile->cur_run->base; } if (pfile->lookaheads) { pfile->lookaheads--; result = pfile->cur_token++; } else { result = _cpp_lex_direct (pfile); #ifdef KEY if (in_omp_pragma /* windows uses \r, linux uses \n */ && (*(pfile->buffer->cur-1) == '\n' || *(pfile->buffer->cur-1) == '\r')) return result; #endif } #ifdef KEY if ((result->flags & BOL) && result->type == CPP_HASH && pfile->state.parsing_args != 1) { // do a lookahead to find if it is OpenMP pragma cpp_token * omp_res = _cpp_omp_token (pfile); if (omp_res) { if (flag_openmp && current_pragma == OMP) { last_token_omp_hash = TRUE; return omp_res; } else if (current_pragma == OPTIONS || current_pragma == UNROLL || current_pragma == EXEC_FREQ) { last_token_omp_hash = TRUE; return omp_res; } #ifdef TARG_SL //fork_joint else if (current_pragma == SL2) { last_token_omp_hash = TRUE; return omp_res; } #endif else { skip_to_end_of_line (pfile); continue; } } } #endif // KEY if (result->flags & BOL) { /* Is this a directive. If _cpp_handle_directive returns false, it is an assembler #. */ if (result->type == CPP_HASH /* 6.10.3 p 11: Directives in a list of macro arguments gives undefined behavior. This implementation handles the directive as normal. */ && pfile->state.parsing_args != 1 && _cpp_handle_directive (pfile, result->flags & PREV_WHITE)) continue; if (pfile->cb.line_change && !pfile->state.skipping) (*pfile->cb.line_change)(pfile, result, pfile->state.parsing_args); } /* We don't skip tokens in directives. */ if (pfile->state.in_directive) break; /* Outside a directive, invalidate controlling macros. At file EOF, _cpp_lex_direct takes care of popping the buffer, so we never get here and MI optimisation works. */ pfile->mi_valid = false; if (!pfile->state.skipping || result->type == CPP_EOF) break; } return result; } /* A NUL terminates the current buffer. For ISO preprocessing this is EOF, but for traditional preprocessing it indicates we need a line refill. Returns TRUE to continue preprocessing a new buffer, FALSE to return a CPP_EOF to the caller. */ static bool continue_after_nul (pfile) cpp_reader *pfile; { cpp_buffer *buffer = pfile->buffer; bool more = false; buffer->saved_flags = BOL; if (CPP_OPTION (pfile, traditional)) { if (pfile->state.in_directive) return false; _cpp_remove_overlay (pfile); more = _cpp_read_logical_line_trad (pfile); _cpp_overlay_buffer (pfile, pfile->out.base, pfile->out.cur - pfile->out.base); pfile->line = pfile->out.first_line; } else { /* Stop parsing arguments with a CPP_EOF. When we finally come back here, do the work of popping the buffer. */ if (!pfile->state.parsing_args) { if (buffer->cur != buffer->line_base) { /* Non-empty files should end in a newline. Don't warn for command line and _Pragma buffers. */ if (!buffer->from_stage3) cpp_error (pfile, DL_PEDWARN, "no newline at end of file"); handle_newline (pfile); } /* Similarly, finish an in-progress directive with CPP_EOF before popping the buffer. */ if (!pfile->state.in_directive && buffer->prev) { more = !buffer->return_at_eof; _cpp_pop_buffer (pfile); } } } return more; } #define IF_NEXT_IS(CHAR, THEN_TYPE, ELSE_TYPE) \ do { \ if (get_effective_char (pfile) == CHAR) \ result->type = THEN_TYPE; \ else \ { \ BACKUP (); \ result->type = ELSE_TYPE; \ } \ } while (0) /* Lex a token into pfile->cur_token, which is also incremented, to get diagnostics pointing to the correct location. Does not handle issues such as token lookahead, multiple-include optimisation, directives, skipping etc. This function is only suitable for use by _cpp_lex_token, and in special cases like lex_expansion_token which doesn't care for any of these issues. When meeting a newline, returns CPP_EOF if parsing a directive, otherwise returns to the start of the token buffer if permissible. Returns the location of the lexed token. */ cpp_token * _cpp_lex_direct (pfile) cpp_reader *pfile; { cppchar_t c; cpp_buffer *buffer; const unsigned char *comment_start; cpp_token *result = pfile->cur_token++; fresh_line: buffer = pfile->buffer; result->flags = buffer->saved_flags; buffer->saved_flags = 0; update_tokens_line: result->line = pfile->line; skipped_white: c = *buffer->cur++; result->col = CPP_BUF_COLUMN (buffer, buffer->cur); #ifdef KEY if (in_omp_pragma && (c == '\n' || c == '\r')) { buffer->saved_flags = BOL; result->type = CPP_NAME; result->val.node = (cpp_hashnode *) ht_lookup (pfile->hash_table, buffer->cur-1, 1, HT_ALLOC); return result; } #endif trigraph: switch (c) { case ' ': case '\t': case '\f': case '\v': case '\0': result->flags |= PREV_WHITE; if (skip_whitespace (pfile, c)) goto skipped_white; /* End of buffer. */ buffer->cur--; if (continue_after_nul (pfile)) goto fresh_line; result->type = CPP_EOF; break; case '\n': case '\r': handle_newline (pfile); buffer->saved_flags = BOL; if (! pfile->state.in_directive) { if (pfile->state.parsing_args == 2) buffer->saved_flags |= PREV_WHITE; if (!pfile->keep_tokens) { pfile->cur_run = &pfile->base_run; result = pfile->base_run.base; pfile->cur_token = result + 1; } goto fresh_line; } result->type = CPP_EOF; break; case '?': case '\\': /* These could start an escaped newline, or '?' a trigraph. Let skip_escaped_newlines do all the work. */ { unsigned int line = pfile->line; c = skip_escaped_newlines (pfile); if (line != pfile->line) { buffer->cur--; /* We had at least one escaped newline of some sort. Update the token's line and column. */ goto update_tokens_line; } } /* We are either the original '?' or '\\', or a trigraph. */ if (c == '?') result->type = CPP_QUERY; else if (c == '\\') goto random_char; else goto trigraph; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': result->type = CPP_NUMBER; parse_number (pfile, &result->val.str, 0); break; case 'L': /* 'L' may introduce wide characters or strings. */ { const unsigned char *pos = buffer->cur; c = get_effective_char (pfile); if (c == '\'' || c == '"') { result->type = (c == '"' ? CPP_WSTRING: CPP_WCHAR); parse_string (pfile, result, c); break; } buffer->cur = pos; } /* Fall through. */ start_ident: case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': result->type = CPP_NAME; result->val.node = parse_identifier (pfile); /* Convert named operators to their proper types. */ if (result->val.node->flags & NODE_OPERATOR) { result->flags |= NAMED_OP; result->type = result->val.node->value.operator; } break; case '\'': case '"': result->type = c == '"' ? CPP_STRING: CPP_CHAR; parse_string (pfile, result, c); break; case '/': /* A potential block or line comment. */ comment_start = buffer->cur; c = get_effective_char (pfile); if (c == '*') { if (skip_block_comment (pfile)) cpp_error (pfile, DL_ERROR, "unterminated comment"); } else if (c == '/' && (CPP_OPTION (pfile, cplusplus_comments) || CPP_IN_SYSTEM_HEADER (pfile))) { /* Warn about comments only if pedantically GNUC89, and not in system headers. */ if (CPP_OPTION (pfile, lang) == CLK_GNUC89 && CPP_PEDANTIC (pfile) && ! buffer->warned_cplusplus_comments) { cpp_error (pfile, DL_PEDWARN, "C++ style comments are not allowed in ISO C90"); cpp_error (pfile, DL_PEDWARN, "(this will be reported only once per input file)"); buffer->warned_cplusplus_comments = 1; } if (skip_line_comment (pfile) && CPP_OPTION (pfile, warn_comments)) cpp_error (pfile, DL_WARNING, "multi-line comment"); } else if (c == '=') { result->type = CPP_DIV_EQ; break; } else { BACKUP (); result->type = CPP_DIV; break; } if (!pfile->state.save_comments) { result->flags |= PREV_WHITE; goto update_tokens_line; } /* Save the comment as a token in its own right. */ save_comment (pfile, result, comment_start, c); break; case '<': if (pfile->state.angled_headers) { result->type = CPP_HEADER_NAME; parse_string (pfile, result, '>'); break; } c = get_effective_char (pfile); if (c == '=') result->type = CPP_LESS_EQ; else if (c == '<') IF_NEXT_IS ('=', CPP_LSHIFT_EQ, CPP_LSHIFT); else if (c == '?' && CPP_OPTION (pfile, cplusplus)) IF_NEXT_IS ('=', CPP_MIN_EQ, CPP_MIN); else if (c == ':' && CPP_OPTION (pfile, digraphs)) { result->type = CPP_OPEN_SQUARE; result->flags |= DIGRAPH; } else if (c == '%' && CPP_OPTION (pfile, digraphs)) { result->type = CPP_OPEN_BRACE; result->flags |= DIGRAPH; } else { BACKUP (); result->type = CPP_LESS; } break; case '>': c = get_effective_char (pfile); if (c == '=') result->type = CPP_GREATER_EQ; else if (c == '>') IF_NEXT_IS ('=', CPP_RSHIFT_EQ, CPP_RSHIFT); else if (c == '?' && CPP_OPTION (pfile, cplusplus)) IF_NEXT_IS ('=', CPP_MAX_EQ, CPP_MAX); else { BACKUP (); result->type = CPP_GREATER; } break; case '%': c = get_effective_char (pfile); if (c == '=') result->type = CPP_MOD_EQ; else if (CPP_OPTION (pfile, digraphs) && c == ':') { result->flags |= DIGRAPH; result->type = CPP_HASH; if (get_effective_char (pfile) == '%') { const unsigned char *pos = buffer->cur; if (get_effective_char (pfile) == ':') result->type = CPP_PASTE; else buffer->cur = pos - 1; } else BACKUP (); } else if (CPP_OPTION (pfile, digraphs) && c == '>') { result->flags |= DIGRAPH; result->type = CPP_CLOSE_BRACE; } else { BACKUP (); result->type = CPP_MOD; } break; case '.': result->type = CPP_DOT; c = get_effective_char (pfile); if (c == '.') { const unsigned char *pos = buffer->cur; if (get_effective_char (pfile) == '.') result->type = CPP_ELLIPSIS; else buffer->cur = pos - 1; } /* All known character sets have 0...9 contiguous. */ else if (ISDIGIT (c)) { result->type = CPP_NUMBER; parse_number (pfile, &result->val.str, 1); } else if (c == '*' && CPP_OPTION (pfile, cplusplus)) result->type = CPP_DOT_STAR; else BACKUP (); break; case '+': c = get_effective_char (pfile); if (c == '+') result->type = CPP_PLUS_PLUS; else if (c == '=') result->type = CPP_PLUS_EQ; else { BACKUP (); result->type = CPP_PLUS; } break; case '-': c = get_effective_char (pfile); if (c == '>') { result->type = CPP_DEREF; if (CPP_OPTION (pfile, cplusplus)) { if (get_effective_char (pfile) == '*') result->type = CPP_DEREF_STAR; else BACKUP (); } } else if (c == '-') result->type = CPP_MINUS_MINUS; else if (c == '=') result->type = CPP_MINUS_EQ; else { BACKUP (); result->type = CPP_MINUS; } break; case '&': c = get_effective_char (pfile); if (c == '&') result->type = CPP_AND_AND; else if (c == '=') result->type = CPP_AND_EQ; else { BACKUP (); result->type = CPP_AND; } break; case '|': c = get_effective_char (pfile); if (c == '|') result->type = CPP_OR_OR; else if (c == '=') result->type = CPP_OR_EQ; else { BACKUP (); result->type = CPP_OR; } break; case ':': c = get_effective_char (pfile); if (c == ':' && CPP_OPTION (pfile, cplusplus)) result->type = CPP_SCOPE; else if (c == '>' && CPP_OPTION (pfile, digraphs)) { result->flags |= DIGRAPH; result->type = CPP_CLOSE_SQUARE; } else { BACKUP (); result->type = CPP_COLON; } break; case '*': IF_NEXT_IS ('=', CPP_MULT_EQ, CPP_MULT); break; case '=': IF_NEXT_IS ('=', CPP_EQ_EQ, CPP_EQ); break; case '!': IF_NEXT_IS ('=', CPP_NOT_EQ, CPP_NOT); break; case '^': IF_NEXT_IS ('=', CPP_XOR_EQ, CPP_XOR); break; case '#': IF_NEXT_IS ('#', CPP_PASTE, CPP_HASH); break; case '~': result->type = CPP_COMPL; break; case ',': result->type = CPP_COMMA; break; case '(': result->type = CPP_OPEN_PAREN; #ifdef KEY if (in_omp_pragma) seen_omp_paren = TRUE; #endif // KEY break; case ')': result->type = CPP_CLOSE_PAREN; break; case '[': result->type = CPP_OPEN_SQUARE; break; case ']': result->type = CPP_CLOSE_SQUARE; break; case '{': result->type = CPP_OPEN_BRACE; break; case '}': result->type = CPP_CLOSE_BRACE; break; case ';': result->type = CPP_SEMICOLON; break; /* @ is a punctuator in Objective-C. */ case '@': result->type = CPP_ATSIGN; break; case '$': if (CPP_OPTION (pfile, dollars_in_ident)) goto start_ident; /* Fall through... */ random_char: default: result->type = CPP_OTHER; result->val.c = c; break; } return result; } /* An upper bound on the number of bytes needed to spell TOKEN, including preceding whitespace. */ unsigned int cpp_token_len (token) const cpp_token *token; { unsigned int len; switch (TOKEN_SPELL (token)) { default: len = 0; break; case SPELL_NUMBER: case SPELL_STRING: len = token->val.str.len; break; case SPELL_IDENT: len = NODE_LEN (token->val.node); break; } /* 1 for whitespace, 4 for comment delimiters. */ return len + 5; } /* Write the spelling of a token TOKEN to BUFFER. The buffer must already contain the enough space to hold the token's spelling. Returns a pointer to the character after the last character written. */ unsigned char * cpp_spell_token (pfile, token, buffer) cpp_reader *pfile; /* Would be nice to be rid of this... */ const cpp_token *token; unsigned char *buffer; { switch (TOKEN_SPELL (token)) { case SPELL_OPERATOR: { const unsigned char *spelling; unsigned char c; if (token->flags & DIGRAPH) spelling = digraph_spellings[(int) token->type - (int) CPP_FIRST_DIGRAPH]; else if (token->flags & NAMED_OP) goto spell_ident; else spelling = TOKEN_NAME (token); while ((c = *spelling++) != '\0') *buffer++ = c; } break; case SPELL_CHAR: *buffer++ = token->val.c; break; spell_ident: case SPELL_IDENT: memcpy (buffer, NODE_NAME (token->val.node), NODE_LEN (token->val.node)); buffer += NODE_LEN (token->val.node); break; case SPELL_NUMBER: memcpy (buffer, token->val.str.text, token->val.str.len); buffer += token->val.str.len; break; case SPELL_STRING: { int left, right, tag; switch (token->type) { case CPP_STRING: left = '"'; right = '"'; tag = '\0'; break; case CPP_WSTRING: left = '"'; right = '"'; tag = 'L'; break; case CPP_CHAR: left = '\''; right = '\''; tag = '\0'; break; case CPP_WCHAR: left = '\''; right = '\''; tag = 'L'; break; case CPP_HEADER_NAME: left = '<'; right = '>'; tag = '\0'; break; default: cpp_error (pfile, DL_ICE, "unknown string token %s\n", TOKEN_NAME (token)); return buffer; } if (tag) *buffer++ = tag; *buffer++ = left; memcpy (buffer, token->val.str.text, token->val.str.len); buffer += token->val.str.len; *buffer++ = right; } break; case SPELL_NONE: cpp_error (pfile, DL_ICE, "unspellable token %s", TOKEN_NAME (token)); break; } return buffer; } /* Returns TOKEN spelt as a null-terminated string. The string is freed when the reader is destroyed. Useful for diagnostics. */ unsigned char * cpp_token_as_text (pfile, token) cpp_reader *pfile; const cpp_token *token; { unsigned int len = cpp_token_len (token); unsigned char *start = _cpp_unaligned_alloc (pfile, len), *end; end = cpp_spell_token (pfile, token, start); end[0] = '\0'; return start; } /* Used by C front ends, which really should move to using cpp_token_as_text. */ const char * cpp_type2name (type) enum cpp_ttype type; { return (const char *) token_spellings[type].name; } /* Writes the spelling of token to FP, without any preceding space. Separated from cpp_spell_token for efficiency - to avoid stdio double-buffering. */ void cpp_output_token (token, fp) const cpp_token *token; FILE *fp; { switch (TOKEN_SPELL (token)) { case SPELL_OPERATOR: { const unsigned char *spelling; int c; if (token->flags & DIGRAPH) spelling = digraph_spellings[(int) token->type - (int) CPP_FIRST_DIGRAPH]; else if (token->flags & NAMED_OP) goto spell_ident; else spelling = TOKEN_NAME (token); c = *spelling; do putc (c, fp); while ((c = *++spelling) != '\0'); } break; case SPELL_CHAR: putc (token->val.c, fp); break; spell_ident: case SPELL_IDENT: fwrite (NODE_NAME (token->val.node), 1, NODE_LEN (token->val.node), fp); break; case SPELL_NUMBER: fwrite (token->val.str.text, 1, token->val.str.len, fp); break; case SPELL_STRING: { int left, right, tag; switch (token->type) { case CPP_STRING: left = '"'; right = '"'; tag = '\0'; break; case CPP_WSTRING: left = '"'; right = '"'; tag = 'L'; break; case CPP_CHAR: left = '\''; right = '\''; tag = '\0'; break; case CPP_WCHAR: left = '\''; right = '\''; tag = 'L'; break; case CPP_HEADER_NAME: left = '<'; right = '>'; tag = '\0'; break; default: fprintf (stderr, "impossible STRING token %s\n", TOKEN_NAME (token)); return; } if (tag) putc (tag, fp); putc (left, fp); fwrite (token->val.str.text, 1, token->val.str.len, fp); putc (right, fp); } break; case SPELL_NONE: /* An error, most probably. */ break; } } /* Compare two tokens. */ int _cpp_equiv_tokens (a, b) const cpp_token *a, *b; { if (a->type == b->type && a->flags == b->flags) switch (TOKEN_SPELL (a)) { default: /* Keep compiler happy. */ case SPELL_OPERATOR: return 1; case SPELL_CHAR: return a->val.c == b->val.c; /* Character. */ case SPELL_NONE: return (a->type != CPP_MACRO_ARG || a->val.arg_no == b->val.arg_no); case SPELL_IDENT: return a->val.node == b->val.node; case SPELL_NUMBER: case SPELL_STRING: return (a->val.str.len == b->val.str.len && !memcmp (a->val.str.text, b->val.str.text, a->val.str.len)); } return 0; } /* Returns nonzero if a space should be inserted to avoid an accidental token paste for output. For simplicity, it is conservative, and occasionally advises a space where one is not needed, e.g. "." and ".2". */ int cpp_avoid_paste (pfile, token1, token2) cpp_reader *pfile; const cpp_token *token1, *token2; { enum cpp_ttype a = token1->type, b = token2->type; cppchar_t c; if (token1->flags & NAMED_OP) a = CPP_NAME; if (token2->flags & NAMED_OP) b = CPP_NAME; c = EOF; if (token2->flags & DIGRAPH) c = digraph_spellings[(int) b - (int) CPP_FIRST_DIGRAPH][0]; else if (token_spellings[b].category == SPELL_OPERATOR) c = token_spellings[b].name[0]; /* Quickly get everything that can paste with an '='. */ if ((int) a <= (int) CPP_LAST_EQ && c == '=') return 1; switch (a) { case CPP_GREATER: return c == '>' || c == '?'; case CPP_LESS: return c == '<' || c == '?' || c == '%' || c == ':'; case CPP_PLUS: return c == '+'; case CPP_MINUS: return c == '-' || c == '>'; case CPP_DIV: return c == '/' || c == '*'; /* Comments. */ case CPP_MOD: return c == ':' || c == '>'; case CPP_AND: return c == '&'; case CPP_OR: return c == '|'; case CPP_COLON: return c == ':' || c == '>'; case CPP_DEREF: return c == '*'; case CPP_DOT: return c == '.' || c == '%' || b == CPP_NUMBER; case CPP_HASH: return c == '#' || c == '%'; /* Digraph form. */ case CPP_NAME: return ((b == CPP_NUMBER && name_p (pfile, &token2->val.str)) || b == CPP_NAME || b == CPP_CHAR || b == CPP_STRING); /* L */ case CPP_NUMBER: return (b == CPP_NUMBER || b == CPP_NAME || c == '.' || c == '+' || c == '-'); case CPP_OTHER: return (CPP_OPTION (pfile, objc) && token1->val.c == '@' && (b == CPP_NAME || b == CPP_STRING)); default: break; } return 0; } /* Output all the remaining tokens on the current line, and a newline character, to FP. Leading whitespace is removed. If there are macros, special token padding is not performed. */ void cpp_output_line (pfile, fp) cpp_reader *pfile; FILE *fp; { const cpp_token *token; token = cpp_get_token (pfile); while (token->type != CPP_EOF) { cpp_output_token (token, fp); token = cpp_get_token (pfile); if (token->flags & PREV_WHITE) putc (' ', fp); } putc ('\n', fp); } /* Returns the value of a hexadecimal digit. */ static unsigned int hex_digit_value (c) unsigned int c; { if (hex_p (c)) return hex_value (c); else abort (); } /* Parse a '\uNNNN' or '\UNNNNNNNN' sequence. Returns 1 to indicate failure if cpplib is not parsing C++ or C99. Such failure is silent, and no variables are updated. Otherwise returns 0, and warns if -Wtraditional. [lex.charset]: The character designated by the universal character name \UNNNNNNNN is that character whose character short name in ISO/IEC 10646 is NNNNNNNN; the character designated by the universal character name \uNNNN is that character whose character short name in ISO/IEC 10646 is 0000NNNN. If the hexadecimal value for a universal character name is less than 0x20 or in the range 0x7F-0x9F (inclusive), or if the universal character name designates a character in the basic source character set, then the program is ill-formed. We assume that wchar_t is Unicode, so we don't need to do any mapping. Is this ever wrong? PC points to the 'u' or 'U', PSTR is points to the byte after PC, LIMIT is the end of the string or charconst. PSTR is updated to point after the UCS on return, and the UCS is written into PC. */ static int maybe_read_ucs (pfile, pstr, limit, pc) cpp_reader *pfile; const unsigned char **pstr; const unsigned char *limit; cppchar_t *pc; { const unsigned char *p = *pstr; unsigned int code = 0; unsigned int c = *pc, length; /* Only attempt to interpret a UCS for C++ and C99. */ if (! (CPP_OPTION (pfile, cplusplus) || CPP_OPTION (pfile, c99))) return 1; if (CPP_WTRADITIONAL (pfile)) cpp_error (pfile, DL_WARNING, "the meaning of '\\%c' is different in traditional C", c); length = (c == 'u' ? 4: 8); if ((size_t) (limit - p) < length) { cpp_error (pfile, DL_ERROR, "incomplete universal-character-name"); /* Skip to the end to avoid more diagnostics. */ p = limit; } else { for (; length; length--, p++) { c = *p; if (ISXDIGIT (c)) code = (code << 4) + hex_digit_value (c); else { cpp_error (pfile, DL_ERROR, "non-hex digit '%c' in universal-character-name", c); /* We shouldn't skip in case there are multibyte chars. */ break; } } } #ifdef TARGET_EBCDIC cpp_error (pfile, DL_ERROR, "universal-character-name on EBCDIC target"); code = 0x3f; /* EBCDIC invalid character */ #else /* True extended characters are OK. */ if (code >= 0xa0 && !(code & 0x80000000) && !(code >= 0xD800 && code <= 0xDFFF)) ; /* The standard permits $, @ and ` to be specified as UCNs. We use hex escapes so that this also works with EBCDIC hosts. */ else if (code == 0x24 || code == 0x40 || code == 0x60) ; /* Don't give another error if one occurred above. */ else if (length == 0) cpp_error (pfile, DL_ERROR, "universal-character-name out of range"); #endif *pstr = p; *pc = code; return 0; } /* Returns the value of an escape sequence, truncated to the correct target precision. PSTR points to the input pointer, which is just after the backslash. LIMIT is how much text we have. WIDE is true if the escape sequence is part of a wide character constant or string literal. Handles all relevant diagnostics. */ cppchar_t cpp_parse_escape (pfile, pstr, limit, wide) cpp_reader *pfile; const unsigned char **pstr; const unsigned char *limit; int wide; { int unknown = 0; const unsigned char *str = *pstr; cppchar_t c, mask; unsigned int width; if (wide) width = CPP_OPTION (pfile, wchar_precision); else width = CPP_OPTION (pfile, char_precision); if (width < BITS_PER_CPPCHAR_T) mask = ((cppchar_t) 1 << width) - 1; else mask = ~0; c = *str++; switch (c) { case '\\': case '\'': case '"': case '?': break; case 'b': c = TARGET_BS; break; case 'f': c = TARGET_FF; break; case 'n': c = TARGET_NEWLINE; break; case 'r': c = TARGET_CR; break; case 't': c = TARGET_TAB; break; case 'v': c = TARGET_VT; break; case '(': case '{': case '[': case '%': /* '\(', etc, are used at beginning of line to avoid confusing Emacs. '\%' is used to prevent SCCS from getting confused. */ unknown = CPP_PEDANTIC (pfile); break; case 'a': if (CPP_WTRADITIONAL (pfile)) cpp_error (pfile, DL_WARNING, "the meaning of '\\a' is different in traditional C"); c = TARGET_BELL; break; case 'e': case 'E': if (CPP_PEDANTIC (pfile)) cpp_error (pfile, DL_PEDWARN, "non-ISO-standard escape sequence, '\\%c'", (int) c); c = TARGET_ESC; break; case 'u': case 'U': unknown = maybe_read_ucs (pfile, &str, limit, &c); break; case 'x': if (CPP_WTRADITIONAL (pfile)) cpp_error (pfile, DL_WARNING, "the meaning of '\\x' is different in traditional C"); { cppchar_t i = 0, overflow = 0; int digits_found = 0; while (str < limit) { c = *str; if (! ISXDIGIT (c)) break; str++; overflow |= i ^ (i << 4 >> 4); i = (i << 4) + hex_digit_value (c); digits_found = 1; } if (!digits_found) cpp_error (pfile, DL_ERROR, "\\x used with no following hex digits"); if (overflow | (i != (i & mask))) { cpp_error (pfile, DL_PEDWARN, "hex escape sequence out of range"); i &= mask; } c = i; } break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': { size_t count = 0; cppchar_t i = c - '0'; while (str < limit && ++count < 3) { c = *str; if (c < '0' || c > '7') break; str++; i = (i << 3) + c - '0'; } if (i != (i & mask)) { cpp_error (pfile, DL_PEDWARN, "octal escape sequence out of range"); i &= mask; } c = i; } break; default: unknown = 1; break; } if (unknown) { if (ISGRAPH (c)) cpp_error (pfile, DL_PEDWARN, "unknown escape sequence '\\%c'", (int) c); else cpp_error (pfile, DL_PEDWARN, "unknown escape sequence: '\\%03o'", (int) c); } if (c > mask) { cpp_error (pfile, DL_PEDWARN, "escape sequence out of range for its type"); c &= mask; } *pstr = str; return c; } /* Interpret a (possibly wide) character constant in TOKEN. WARN_MULTI warns about multi-character charconsts. PCHARS_SEEN points to a variable that is filled in with the number of characters seen, and UNSIGNEDP to a variable that indicates whether the result has signed type. */ cppchar_t cpp_interpret_charconst (pfile, token, pchars_seen, unsignedp) cpp_reader *pfile; const cpp_token *token; unsigned int *pchars_seen; int *unsignedp; { const unsigned char *str = token->val.str.text; const unsigned char *limit = str + token->val.str.len; unsigned int chars_seen = 0; size_t width, max_chars; cppchar_t c, mask, result = 0; bool unsigned_p; #ifdef MULTIBYTE_CHARS (void) local_mbtowc (NULL, NULL, 0); #endif /* Width in bits. */ if (token->type == CPP_CHAR) { width = CPP_OPTION (pfile, char_precision); max_chars = CPP_OPTION (pfile, int_precision) / width; unsigned_p = CPP_OPTION (pfile, unsigned_char); } else { width = CPP_OPTION (pfile, wchar_precision); max_chars = 1; unsigned_p = CPP_OPTION (pfile, unsigned_wchar); } if (width < BITS_PER_CPPCHAR_T) mask = ((cppchar_t) 1 << width) - 1; else mask = ~0; while (str < limit) { #ifdef MULTIBYTE_CHARS wchar_t wc; int char_len; char_len = local_mbtowc (&wc, str, limit - str); if (char_len == -1) { cpp_error (pfile, DL_WARNING, "ignoring invalid multibyte character"); c = *str++; } else { str += char_len; c = wc; } #else c = *str++; #endif if (c == '\\') c = cpp_parse_escape (pfile, &str, limit, token->type == CPP_WCHAR); #ifdef MAP_CHARACTER if (ISPRINT (c)) c = MAP_CHARACTER (c); #endif chars_seen++; /* Truncate the character, scale the result and merge the two. */ c &= mask; if (width < BITS_PER_CPPCHAR_T) result = (result << width) | c; else result = c; } if (chars_seen == 0) cpp_error (pfile, DL_ERROR, "empty character constant"); else if (chars_seen > 1) { /* Multichar charconsts are of type int and therefore signed. */ unsigned_p = 0; if (chars_seen > max_chars) { chars_seen = max_chars; cpp_error (pfile, DL_WARNING, "character constant too long for its type"); } else if (CPP_OPTION (pfile, warn_multichar)) cpp_error (pfile, DL_WARNING, "multi-character character constant"); } /* Sign-extend or truncate the constant to cppchar_t. The value is in WIDTH bits, but for multi-char charconsts it's value is the full target type's width. */ if (chars_seen > 1) width *= max_chars; if (width < BITS_PER_CPPCHAR_T) { mask = ((cppchar_t) 1 << width) - 1; if (unsigned_p || !(result & (1 << (width - 1)))) result &= mask; else result |= ~mask; } *pchars_seen = chars_seen; *unsignedp = unsigned_p; return result; } /* Memory buffers. Changing these three constants can have a dramatic effect on performance. The values here are reasonable defaults, but might be tuned. If you adjust them, be sure to test across a range of uses of cpplib, including heavy nested function-like macro expansion. Also check the change in peak memory usage (NJAMD is a good tool for this). */ #define MIN_BUFF_SIZE 8000 #define BUFF_SIZE_UPPER_BOUND(MIN_SIZE) (MIN_BUFF_SIZE + (MIN_SIZE) * 3 / 2) #define EXTENDED_BUFF_SIZE(BUFF, MIN_EXTRA) \ (MIN_EXTRA + ((BUFF)->limit - (BUFF)->cur) * 2) #if MIN_BUFF_SIZE > BUFF_SIZE_UPPER_BOUND (0) #error BUFF_SIZE_UPPER_BOUND must be at least as large as MIN_BUFF_SIZE! #endif /* Create a new allocation buffer. Place the control block at the end of the buffer, so that buffer overflows will cause immediate chaos. */ static _cpp_buff * new_buff (len) size_t len; { _cpp_buff *result; unsigned char *base; if (len < MIN_BUFF_SIZE) len = MIN_BUFF_SIZE; len = CPP_ALIGN (len); base = xmalloc (len + sizeof (_cpp_buff)); result = (_cpp_buff *) (base + len); result->base = base; result->cur = base; result->limit = base + len; result->next = NULL; return result; } /* Place a chain of unwanted allocation buffers on the free list. */ void _cpp_release_buff (pfile, buff) cpp_reader *pfile; _cpp_buff *buff; { _cpp_buff *end = buff; while (end->next) end = end->next; end->next = pfile->free_buffs; pfile->free_buffs = buff; } /* Return a free buffer of size at least MIN_SIZE. */ _cpp_buff * _cpp_get_buff (pfile, min_size) cpp_reader *pfile; size_t min_size; { _cpp_buff *result, **p; for (p = &pfile->free_buffs;; p = &(*p)->next) { size_t size; if (*p == NULL) return new_buff (min_size); result = *p; size = result->limit - result->base; /* Return a buffer that's big enough, but don't waste one that's way too big. */ if (size >= min_size && size <= BUFF_SIZE_UPPER_BOUND (min_size)) break; } *p = result->next; result->next = NULL; result->cur = result->base; return result; } /* Creates a new buffer with enough space to hold the uncommitted remaining bytes of BUFF, and at least MIN_EXTRA more bytes. Copies the excess bytes to the new buffer. Chains the new buffer after BUFF, and returns the new buffer. */ _cpp_buff * _cpp_append_extend_buff (pfile, buff, min_extra) cpp_reader *pfile; _cpp_buff *buff; size_t min_extra; { size_t size = EXTENDED_BUFF_SIZE (buff, min_extra); _cpp_buff *new_buff = _cpp_get_buff (pfile, size); buff->next = new_buff; memcpy (new_buff->base, buff->cur, BUFF_ROOM (buff)); return new_buff; } /* Creates a new buffer with enough space to hold the uncommitted remaining bytes of the buffer pointed to by BUFF, and at least MIN_EXTRA more bytes. Copies the excess bytes to the new buffer. Chains the new buffer before the buffer pointed to by BUFF, and updates the pointer to point to the new buffer. */ void _cpp_extend_buff (pfile, pbuff, min_extra) cpp_reader *pfile; _cpp_buff **pbuff; size_t min_extra; { _cpp_buff *new_buff, *old_buff = *pbuff; size_t size = EXTENDED_BUFF_SIZE (old_buff, min_extra); new_buff = _cpp_get_buff (pfile, size); memcpy (new_buff->base, old_buff->cur, BUFF_ROOM (old_buff)); new_buff->next = old_buff; *pbuff = new_buff; } /* Free a chain of buffers starting at BUFF. */ void _cpp_free_buff (buff) _cpp_buff *buff; { _cpp_buff *next; for (; buff; buff = next) { next = buff->next; free (buff->base); } } /* Allocate permanent, unaligned storage of length LEN. */ unsigned char * _cpp_unaligned_alloc (pfile, len) cpp_reader *pfile; size_t len; { _cpp_buff *buff = pfile->u_buff; unsigned char *result = buff->cur; if (len > (size_t) (buff->limit - result)) { buff = _cpp_get_buff (pfile, len); buff->next = pfile->u_buff; pfile->u_buff = buff; result = buff->cur; } buff->cur = result + len; return result; } /* Allocate permanent, unaligned storage of length LEN from a_buff. That buffer is used for growing allocations when saving macro replacement lists in a #define, and when parsing an answer to an assertion in #assert, #unassert or #if (and therefore possibly whilst expanding macros). It therefore must not be used by any code that they might call: specifically the lexer and the guts of the macro expander. All existing other uses clearly fit this restriction: storing registered pragmas during initialization. */ unsigned char * _cpp_aligned_alloc (pfile, len) cpp_reader *pfile; size_t len; { _cpp_buff *buff = pfile->a_buff; unsigned char *result = buff->cur; if (len > (size_t) (buff->limit - result)) { buff = _cpp_get_buff (pfile, len); buff->next = pfile->a_buff; pfile->a_buff = buff; result = buff->cur; } buff->cur = result + len; return result; }
usolve_dft_inc.c
/** * @file usolve_dft_inc.c * @brief u-subproblem DFT solver for TV-regularized deconvolution * @author Pascal Getreuer <getreuer@gmail.com> * * Copyright (c) 2010-2012, Pascal Getreuer * All rights reserved. * * This program is free software: you can use, modify and/or * redistribute it under the terms of the simplified BSD License. You * should have received a copy of this license along this program. If * not, see <http://www.opensource.org/licenses/bsd-license.html>. */ #include <string.h> #include "util_deconv.h" /** * @brief Symmetrically pad an image to twice its size * @param Dest the destination * @param Src the source image * @param Width, Height, NumChannels the dimensions of Src * * The Src image of size Width by Height is reflected over each axis to * create an image that is 2*Width by 2*Height. */ static void SymmetricPadding(num *Dest, const num *Src, int Width, int Height, int NumChannels) { const int InPlace = (Dest == Src); const int PadWidth = 2*Width; const long ChannelJump = ((long)PadWidth) * ((long)Height); const int SrcStride = (InPlace) ? PadWidth : Width; int x, y, k; for(k = 0; k < NumChannels; k++) { for(y = 0; y < Height; y++, Dest += PadWidth, Src += SrcStride) { if(!InPlace) memcpy(Dest, Src, sizeof(num) * Width); for(x = 0; x < Width; x++) Dest[Width + x] = Dest[Width - 1 - x]; memcpy(Dest + ((long)(2*(Height - y) - 1)) * PadWidth, Dest, sizeof(num) * PadWidth); } Dest += ChannelJump; if(InPlace) Src = Dest; } } /** @brief Compute ATrans = Alpha . conj(KernelTrans) . DFT[ztilde] */ static void AdjBlurFourier(numcomplex *ATrans, num *A, FFT(plan) TransformA, const numcomplex *KernelTrans, const num *ztilde, int Width, int Height, int NumChannels, num Alpha) { const int PadWidth = 2*Width; const int PadHeight = 2*Height; const int TransWidth = PadWidth/2 + 1; const long TransNumPixels = ((long)TransWidth) * ((long)PadHeight); long i; int k; /* Compute A as a symmetric padded version of ztilde */ SymmetricPadding(A, ztilde, Width, Height, NumChannels); /* Compute ATrans = DFT[A] */ FFT(execute)(TransformA); /* Compute ATrans = Alpha . conj(KernelTrans) . ATrans */ for(k = 0; k < NumChannels; k++, ATrans += TransNumPixels) for(i = 0; i < TransNumPixels; i++) { num Temp = Alpha*(KernelTrans[i][0] * ATrans[i][1] - KernelTrans[i][1] * ATrans[i][0]); ATrans[i][0] = Alpha*(KernelTrans[i][0] * ATrans[i][0] + KernelTrans[i][1] * ATrans[i][1]); ATrans[i][1] = Temp; } } /** * @brief Intializations to prepare TvRestore for Fourier deconvolution * @param S tvreg solver state * @return 1 on success, 0 on failure */ static int InitDeconvFourier(tvregsolver *S) { num *B = S->B; numcomplex *ATrans = (numcomplex *)S->ATrans; numcomplex *BTrans = (numcomplex *)S->BTrans; numcomplex *KernelTrans = (numcomplex *)S->KernelTrans; num *DenomTrans = S->DenomTrans; const num *Kernel = S->Opt.Kernel; const int KernelWidth = S->Opt.KernelWidth; const int KernelHeight = S->Opt.KernelHeight; const int PadWidth = S->PadWidth; const int PadHeight = S->PadHeight; const num Alpha = S->Alpha; const long PadNumPixels = ((long)PadWidth) * ((long)PadHeight); const int TransWidth = PadWidth/2 + 1; FFT(plan) Plan = NULL; long i; int PadSize[2], x0, y0, x, y, xi, yi; int exit; for(i = 0; i < PadNumPixels; i++) B[i] = 0; x0 = -KernelWidth/2; y0 = -KernelHeight/2; /* Pad Kernel to size PadWidth by PadHeight. If Kernel happens to be larger, it is wrapped. */ for(y = y0, i = 0; y < y0 + KernelHeight; y++) { yi = PeriodicExtension(PadHeight, y); for(x = x0; x < x0 + KernelWidth; x++, i++) { xi = PeriodicExtension(PadWidth, x); B[xi + PadWidth*yi] += Kernel[i]; } } /* Compute the Fourier transform of the padded Kernel */ #ifdef _OPENMP #pragma omp critical (fftw) #endif exit = !(Plan = FFT(plan_dft_r2c_2d)(PadHeight, PadWidth, B, KernelTrans, FFTW_ESTIMATE | FFTW_DESTROY_INPUT)); if (exit) return 0; FFT(execute)(Plan); #ifdef _OPENMP #pragma omp critical (fftw) #endif FFT(destroy_plan)(Plan); /* Precompute the denominator that will be used in the u-subproblem. */ for(y = 0, i = 0; y < PadHeight; y++) for(x = 0; x < TransWidth; x++, i++) DenomTrans[i] = (num)(PadNumPixels*(Alpha*(KernelTrans[i][0]*KernelTrans[i][0] + KernelTrans[i][1]*KernelTrans[i][1]) + 2*(2 - cos(x*M_2PI/PadWidth) - cos(y*M_2PI/PadHeight)))); /* Plan Fourier transforms */ PadSize[1] = PadWidth; PadSize[0] = PadHeight; #ifdef _OPENMP #pragma omp critical (fftw) #endif exit = !(S->TransformA = FFT(plan_many_dft_r2c)(2, PadSize, S->NumChannels, S->A, NULL, 1, PadNumPixels, ATrans, NULL, 1, TransWidth*PadHeight, FFTW_ESTIMATE | FFTW_DESTROY_INPUT)) || !(S->InvTransformA = FFT(plan_many_dft_c2r)(2, PadSize, S->NumChannels, ATrans, NULL, 1, TransWidth*PadHeight, S->A, NULL, 1, PadNumPixels, FFTW_ESTIMATE | FFTW_DESTROY_INPUT)) || !(S->TransformB = FFT(plan_many_dft_r2c)(2, PadSize, S->NumChannels, S->B, NULL, 1, PadNumPixels, BTrans, NULL, 1, TransWidth*PadHeight, FFTW_ESTIMATE | FFTW_DESTROY_INPUT)) || !(S->InvTransformB = FFT(plan_many_dft_c2r)(2, PadSize, S->NumChannels, BTrans, NULL, 1, TransWidth*PadHeight, S->B, NULL, 1, PadNumPixels, FFTW_ESTIMATE | FFTW_DESTROY_INPUT)); if (exit) return 0; /* Compute ATrans = Alpha . conj(KernelTrans) . DFT[f] */ if(!S->UseZ) AdjBlurFourier(ATrans, S->A, S->TransformA, (const numcomplex *)KernelTrans, S->f, S->Width, S->Height, S->NumChannels, Alpha); S->Ku = S->A; return 1; } /** * @brief Compute BTrans = ( ATrans - DFT[div(dtilde)] ) / DenomTrans * * This subroutine is a part of the DFT u-subproblem solution that is common * to both the d,u splitting (UseZ=0) and d,u,z splitting (UseZ=1). */ static void UTransSolveFourier(numcomplex *BTrans, num *B, FFT(plan) TransformB, numcomplex *ATrans, const numvec2 *dtilde, const num *DenomTrans, int Width, int Height, int NumChannels) { const long PadWidth = 2*Width; const long PadHeight = 2*Height; const long TransWidth = PadWidth/2 + 1; const long TransNumPixels = TransWidth * PadHeight; long i; int k; /* Compute B = div(dtilde) and pad with even half-sample symmetry */ Divergence(B, PadWidth, PadHeight, dtilde, Width, Height, NumChannels); SymmetricPadding(B, B, Width, Height, NumChannels); /* Compute BTrans = DFT[B] */ FFT(execute)(TransformB); /* Compute BTrans = ( ATrans - BTrans ) / DenomTrans */ for(k = 0; k < NumChannels; k++, ATrans += TransNumPixels, BTrans += TransNumPixels) for(i = 0; i < TransNumPixels; i++) { BTrans[i][0] = (ATrans[i][0] - BTrans[i][0]) / DenomTrans[i]; BTrans[i][1] = (ATrans[i][1] - BTrans[i][1]) / DenomTrans[i]; } } /** * @brief Solve the u-subproblem using DFT transforms (UseZ = 0) * * This routine solves the u-subproblem * \f[ \tfrac{\lambda}{\gamma}K^* Ku -\Delta u = \tfrac{\lambda}{ * \gamma}K^* f -\operatorname{div}\tilde{d}, \f] * where K denotes the blur operator \f$ Ku := \varphi * u \f$. The solution * is obtained using the discrete Fourier transform (DFT) as * \f[ u=\mathcal{F}^{-1}\left[\frac{\frac{\lambda}{\gamma}\overline{ * \mathcal{F}(\varphi)}\cdot\mathcal{F}(Ef)- \mathcal{F}\bigl(E * \operatorname{div}(d-b)\bigr)}{\frac{\lambda}{\gamma}\lvert\mathcal{F}( * \varphi)\rvert^2 - \mathcal{F}(\Delta)}\right], \f] * where E denotes symmetric extension and \f$ \mathcal{F} \f$ denotes the * DFT. */ static num UDeconvFourier(tvregsolver *S) { /* BTrans = ( ATrans - DFT[div(dtilde)] ) / DenomTrans */ UTransSolveFourier((numcomplex *)S->BTrans, S->B, S->TransformB, (numcomplex *)S->ATrans, S->dtilde, S->DenomTrans, S->Width, S->Height, S->NumChannels); /* B = IDFT[BTrans] */ FFT(execute)(S->InvTransformB); /* Trim padding, compute ||B - u||, and assign u = B */ return UUpdate(S); } #if defined(TVREG_USEZ) || defined(DOXYGEN) /** * @brief Solve the u-subproblem using DFT transforms (UseZ = 1) * * This extended version of UDeconvFourier is used when performing Fourier- * based deconvolution with the three-auxiliary variable algorithm (UseZ = 1), * that is, in a deconvolution problem with a non-symmetric kernel and non- * Gaussian noise model. */ static num UDeconvFourierZ(tvregsolver *S) { numcomplex *ATrans = (numcomplex *)S->ATrans; numcomplex *BTrans = (numcomplex *)S->BTrans; const numcomplex *KernelTrans = (const numcomplex *)S->KernelTrans; const int TransWidth = S->PadWidth/2 + 1; const long TransNumPixels = ((long)TransWidth) * ((long)S->PadHeight); long i; int k; /* Compute ATrans = Alpha . conj(KernelTrans) . DFT[ztilde] */ AdjBlurFourier(ATrans, S->A, S->TransformA, KernelTrans, S->ztilde, S->Width, S->Height, S->NumChannels, S->Alpha); /* BTrans = ( ATrans - DFT[div(dtilde)] ) / DenomTrans */ UTransSolveFourier((numcomplex *)S->BTrans, S->B, S->TransformB, ATrans, S->dtilde, S->DenomTrans, S->Width, S->Height, S->NumChannels); /* Compute ATrans = KernelTrans . BTrans */ for(k = 0; k < S->NumChannels; k++, ATrans += TransNumPixels, BTrans += TransNumPixels) for(i = 0; i < TransNumPixels; i++) { ATrans[i][0] = KernelTrans[i][0] * BTrans[i][0] - KernelTrans[i][1] * BTrans[i][1]; ATrans[i][1] = KernelTrans[i][0] * BTrans[i][1] + KernelTrans[i][1] * BTrans[i][0]; } /* A = IDFT[ATrans] = new Ku */ FFT(execute)(S->InvTransformA); /* B = IDFT[BTrans] = new u */ FFT(execute)(S->InvTransformB); /* Trim padding, compute ||B - u||, and assign u = B */ return UUpdate(S); } #endif
altsim.h
#ifndef ALTSIM_H #define ALTSIM_H #include <stdio.h> #include <omp.h> const int num_loops = 3; const int accuracy_mode = 1; const int pressure_solve_steps = 20; inline double advect_sample(const double const * v, int Ny, double s, double t) { return (1 - s) * ((1 - t) * v[0] + t * v[1]) + s * ((1 - t) * v[Ny] + t * v[Ny + 1]); } void C_advect_velocity( double * v, const double * const v0, const unsigned char * bound, int * advect_indexes, double * advect_lerps, const int Nx, const int Ny, const double dx, const double dy, const double dt) { int x, y, idx, xi, yi, iidx; int vyidx = Nx * Ny; float xa, ya, s, t; // memset(advect_indexes, 0, 2 * Nx * Ny * sizeof(int)); // memset(advect_lerps, 0, 2 * Nx * Ny * sizeof(double)); #pragma omp for schedule(auto) for(x = 0; x < Nx; ++x) { for(y = 0; y < Ny; ++y) { idx = y + x * Ny; xa = (double)x - dt * v0[0 + idx] / dx; ya = (double)y - dt * v0[vyidx + idx] / dy; xa = xa < 0.0 ? 0.0 : (xa >= Nx - 1.01) ? (Nx - 1.01) : xa; ya = ya < 0.0 ? 0.0 : (ya >= Ny - 1.01) ? (Ny - 1.01) : ya; xi = (int)xa; yi = (int)ya; s = xa - (double)xi; t = ya - (double)yi; advect_indexes[0 + idx] = xi; advect_indexes[vyidx + idx] = yi; advect_lerps[0 + idx] = s; advect_lerps[vyidx + idx] = t; if(!bound[idx]) { iidx = yi + xi * Ny; v[0 + idx] = advect_sample(v0 + iidx, Ny, s, t); v[vyidx + idx] = advect_sample(v0 + vyidx + iidx, Ny, s, t); } else { v[0 + idx] = v0[0 + idx]; v[vyidx + idx] = v0[vyidx + idx]; } } } } void C_apply_advection(double * d, const double * const d0, const unsigned char * bound, int * advect_indexes, double * advect_lerps, const int Nx, const int Ny) { int vyidx = Nx * Ny; int x, y, idx, iidx; #pragma omp for schedule(auto) for(x = 0; x < Nx; ++x) { for(y = 0; y < Ny; ++y) { idx = y + x * Ny; if(!bound[idx]) { iidx = advect_indexes[vyidx + idx] + advect_indexes[idx] * Ny; d[0 + idx] = advect_sample(d0 + iidx, Ny, advect_lerps[idx], advect_lerps[vyidx + idx]); } else { d[0 + idx] *= 0.9; } } } } void C_pressure_solve( double * pressure, double * pressure_buffer, double const * const div, unsigned char const * const bound, int const Nx, int const Ny, double const dx, double const dy) { //memset(pressure, 0, Nx * Ny * sizeof(double)); //memset(pressure_buffer, 0, Nx * Ny * sizeof(double)); int x, y, k, idx; double * temp = 0; // shouldn't have any pressure inside solids #pragma omp for schedule(auto) for(x = 0; x < Nx * Ny; ++x) { pressure[x] *= bound[x] ? 0.9 : 1.0; } /* #pragma omp parallel for schedule(auto) private(y, idx) for(x = 0; x < Nx * Ny; ++x) { pressure[x] = 0.0; } */ // make sure this is a multiple of 2 steps for(k = 0; k < pressure_solve_steps; ++k) { #pragma omp for schedule(auto) for(x = 0; x < Nx; ++x) { pressure_buffer[x * Ny] = 0; pressure_buffer[(x + 1) * Ny - 1] = 0; } #pragma omp for schedule(auto) for(y = 0; y < Ny; ++y) { pressure_buffer[y] = 0; pressure_buffer[y + Ny * (Nx - 1)] = 0; } #pragma omp for schedule(auto) for(x = 1; x < Nx - 1; ++x) { for(y = 1; y < Ny - 1; ++y) { idx = y + x * Ny; pressure_buffer[idx] = (1.0 / 4) * ( (bound[idx - 1] ? pressure[idx] : pressure[idx - 1]) + (bound[idx + 1] ? pressure[idx] : pressure[idx + 1]) + (bound[idx - Ny] ? pressure[idx] : pressure[idx - Ny]) + (bound[idx + Ny] ? pressure[idx] : pressure[idx + Ny]) - dx * dy * div[idx]); } } { temp = pressure_buffer; pressure_buffer = pressure; pressure = temp; } #pragma omp barrier } } void C_divergence( double * div, const double * const v, const unsigned char * bound, int const Nx, int const Ny, const double dx, const double dy) { int x, y, idx; int vyidx = Nx * Ny; #pragma omp for schedule(auto) for(x = 1; x < Nx - 1; ++x) { for(y = 1; y < Ny - 1; ++y) { idx = y + x * Ny; div[idx] = (bound[idx + Ny] ? 0.0 : v[idx + Ny] / (2 * dx)) - (bound[idx - Ny] ? 0.0 : v[idx - Ny] / (2 * dx)) + (bound[idx + 1] ? 0.0 : v[vyidx + idx + 1] / (2 * dy)) - (bound[idx - 1] ? 0.0 : v[vyidx + idx - 1] / (2 * dy)); } } } void C_sub_gradient( double * v, const double * const v0, const double * const p, const unsigned char * bound, const int Nx, const int Ny, const double dx, const double dy) { int x, y, idx; int vyidx = Nx * Ny; #pragma omp for schedule(auto) for(x = 1; x < Nx - 1; ++x) { for(y = 1; y < Ny - 1; ++y) { idx = y + x * Ny; v[idx] = bound[idx] ? v0[idx] : v0[idx] - 1 / (2 * dx) * (p[idx + Ny] - p[idx - Ny]); v[vyidx + idx] = bound[idx] ? v0[vyidx + idx] : v0[vyidx + idx] - 1 / (2 * dy) * (p[idx + 1] - p[idx - 1]); } } } void C_enforce_slip( double * v, const unsigned char * bound, const int Nx, const int Ny) { int x, y, idx; int vyidx = Nx * Ny; /* #pragma omp parallel for schedule(auto) private(y, idx) for(x = 1; x < Nx - 1; ++x) { for(y = 1; y < Ny - 1; ++y) { idx = y + x * Ny; // take x velocity from vertical boundaries v[idx] = bound[idx] ? 0.0 : v[idx]; // take y velocity from horizontal boundaries v[vyidx + idx] = bound[idx] ? 0.0 : v[vyidx + idx]; } } */ #pragma omp for schedule(auto) for(x = 1; x < Nx - 1; ++x) { for(y = 1; y < Ny - 1; ++y) { idx = y + x * Ny; // take x velocity from vertical boundaries v[idx] = bound[idx] ? v[idx] : bound[idx + Ny] ? v[idx + Ny] : bound[idx - Ny] ? v[idx - Ny] : v[idx]; // take y velocity from horizontal boundaries v[vyidx + idx] = bound[idx] ? v[vyidx + idx] : bound[idx + 1] ? v[vyidx + idx + 1] : bound[idx - 1] ? v[vyidx + idx - 1] : v[vyidx + idx]; } } } void C_step( double * v, double * vtmp, double * vtmp2, double * p, double * div, double * density_arrays, const int num_density_arrays, const unsigned char * bound, int * advect_indexes, double * advect_lerps, const int Nx, const int Ny, const double dx, const double dy, const double dt0 ) { #pragma omp parallel { int i, j, x, idx; double dt = dt0 / num_loops; for(i = 0; i < num_loops; ++i) { #pragma omp barrier if(accuracy_mode == 1) { // BFECC C_advect_velocity(vtmp2, v, bound, advect_indexes, advect_lerps, Nx, Ny, dx, dy, dt); C_advect_velocity(vtmp, vtmp2, bound, advect_indexes, advect_lerps, Nx, Ny, dx, dy, -dt); #pragma omp for schedule(auto) for(x = 0; x < Nx * Ny * 2; ++x) { vtmp2[x] = 1.5 * v[x] - 0.5 * vtmp[x]; } // Corrected advection C_advect_velocity(vtmp, vtmp2, bound, advect_indexes, advect_lerps, Nx, Ny, dx, dy, dt); } else { // Standard advection C_advect_velocity(vtmp, v, bound, advect_indexes, advect_lerps, Nx, Ny, dx, dy, dt); } // remove divergence C_divergence(div, vtmp, bound, Nx, Ny, dx, dy); C_pressure_solve(p, vtmp2, div, bound, Nx, Ny, dx, dy); C_sub_gradient(v, vtmp, p, bound, Nx, Ny, dx, dy); // enforce slip at boundary C_enforce_slip(v, bound, Nx, Ny); for(j = 0; j < num_density_arrays; ++j) { idx = Nx * Ny * j; #pragma omp for schedule(auto) for(x = 0; x < Nx * Ny; ++x) { vtmp[x] = density_arrays[idx + x]; } C_apply_advection(density_arrays + idx, vtmp, bound, advect_indexes, advect_lerps, Nx, Ny); } } } } #endif
taskwait_omp.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * A bunch of n tasks (1st arg) are created by a single thread. * Each task creates two tasks more and executes a taskwait directive */ #include <assert.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #define NUM_TASKS 50000 #define NUM_REPS 1 int o = 0; int pp = 0; void na(float value) { o++; } void sscal(float value, float *a) { *a = *a * value; } void presscal(float value, float *a) { #pragma omp task { sscal(value, a); } #pragma omp task { na(value); } #pragma omp taskwait } int main(int argc, char *argv[]) { int i, r, nthreads; double *time, avg_time = 0.0; char *str, *endptr; float *a; double time2 = 0.0; #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); } } if (argc > 1) { str = argv[1]; } int ntasks = argc > 1 ? strtoll(str, &endptr, 10) : NUM_TASKS; if (ntasks < nthreads) ntasks = nthreads; int rep = (argc > 2) ? atoi(argv[2]) : NUM_REPS; time = malloc(sizeof(double) * rep); a = malloc(sizeof(float) * ntasks); for (i = 0; i < ntasks; i++) { a[i] = i + 100.0f; } for (r = 0; r < rep; r++) { time[r] = omp_get_wtime(); #pragma omp parallel { #pragma omp single { time2 = omp_get_wtime(); for (i = 0; i < ntasks; i++) { #pragma omp task firstprivate(i) { presscal(0.9f, &a[i]); } } time2 = omp_get_wtime() - time2; } } time[r] = omp_get_wtime() - time[r]; avg_time += time[r]; } for (i = 0; i < ntasks; i++) { if (a[i] != (i + 100.0f) * 0.9f) { printf("error: a[%d]=%2.f expected %2.f\n", i, a[i], (i + 100.0f) * 0.9f); } } avg_time /= rep; printf("nthreads: %d\nntasks: %d\nTime(s):%f\nCreation Time: %f\n", nthreads, ntasks, avg_time, time2); printf("o=%d and it should be %d\n", o, ntasks); printf("pp=%d and it should be %d\n", pp, ntasks); return EXIT_SUCCESS; }
interactions.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "interactions.h" #include "cloud_util.h" #include "steric.h" typedef struct _box { int head; } box; struct _ix { double L; double r; int boxdim; box ***boxes; int maxNx; int curNx; ix_pair *pairs; }; // it is possible to use smaller boxes and more complex neighbor patterns #define NUM_BOX_NEIGHBORS 13 int box_neighbors[NUM_BOX_NEIGHBORS][3] = { {-1,-1,-1}, {-1,-1, 0}, {-1,-1,+1}, {-1, 0,-1}, {-1, 0, 0}, {-1, 0,+1}, {-1,+1,-1}, {-1,+1, 0}, {-1,+1,+1}, { 0,-1,-1}, { 0,-1, 0}, { 0,-1,+1}, { 0, 0,-1} }; /* maxNx: _predicted_ maximum number of interactions */ int IXCreate (double L, int boxdim, int maxNx, IX *ix_p) { IX ix; int err; boxdim = 8;//I added this line if (boxdim < 4) /* need at least four boxes in each direction */ { boxdim = 4; } err = safeMALLOC(sizeof (*ix), &ix);CHK(err); ix->L = L; ix->boxdim = boxdim; ix->curNx = 0; ix->maxNx = maxNx; err = safeMALLOC(boxdim * sizeof (box **), &(ix->boxes));CHK(err); for (int i = 0; i < boxdim; i++) { err = safeMALLOC(boxdim * sizeof (box *), &(ix->boxes[i]));CHK(err); for (int j = 0; j < boxdim; j++) { err = safeMALLOC(boxdim * sizeof (box), &(ix->boxes[i][j]));CHK(err); } } err = safeMALLOC(maxNx * sizeof (ix_pair), &(ix->pairs));CHK(err); *ix_p = ix; return (0); } int IXDestroy (IX *ix_p) { int boxdim = (*ix_p)->boxdim; free ((*ix_p)->pairs); #pragma omp parallel for //I changed this line for (int i = 0; i < boxdim; i++) { for (int j = 0; j < boxdim; j++) { free((*ix_p)->boxes[i][j]); } free((*ix_p)->boxes[i]); } free((*ix_p)->boxes); free(*ix_p); *ix_p = NULL; return 0; } static void IXClearPairs(IX ix) { ix->curNx = 0; } static void IXPushPair(IX ix, int p1, int p2) { ix_pair *pair; if (ix->curNx == ix->maxNx) { int maxNx = ix->maxNx * 2; ix_pair *newpairs; safeMALLOC(maxNx * sizeof(ix_pair), &newpairs); memcpy(newpairs, ix->pairs, ix->curNx * sizeof (ix_pair)); free(ix->pairs); ix->pairs = newpairs; ix->maxNx = maxNx; } pair = &(ix->pairs[ix->curNx++]); pair->p[0] = p1; pair->p[1] = p2; } int interactions_check(IX ix, Vector X, double r, int Npairs, ix_pair *pairs, int *total) { double L = ix->L; double r2 = r * r; int Np = X->Np; int intcount = 0; for (int i = 0; i < Np; i++) { for (int j = i + 1; j < Np; j++) { double dx, dy, dz; double dist2 = dist_and_disp (IDX(X,0,i),IDX(X,1,i),IDX(X,2,i), IDX(X,0,j),IDX(X,1,j),IDX(X,2,j), L, &dx, &dy, &dz); if (dist2 < r2) { intcount++; int k; for (k = 0; k < Npairs; k++) { if ((pairs[k].p[0] == i && pairs[k].p[1] == j) || (pairs[k].p[0] == j && pairs[k].p[1] == i)) { break; } } if (k == Npairs) { fprintf(stderr,"Pair %d %d not in list\n", i, j); return 1; } } } } *total = intcount; return 0; } int IXGetPairs(IX ix, Vector X, double r, int *Npairs, ix_pair **pairs) { int boxdim = ix->boxdim; double L = ix->L; double boxwidth = L / boxdim; double cutoff2 = r * r; box ***b = ix->boxes; int Np = X->Np; int err; if (r > boxwidth) { printf("interactions: radius %g is greater than box width %g\n", r, boxwidth); return 1; } // box indices int idx, idy, idz; int neigh_idx, neigh_idy, neigh_idz; int *next; box *bp, *neigh_bp; #pragma omp parallel for// I added this line for (int i = 0; i < boxdim; i++) { for (int j = 0; j < boxdim; j++) { for (int k = 0; k < boxdim; k++) { b[i][j][k].head = -1; } } } err = safeMALLOC(Np * sizeof(int), &next);CHK(err); // traverse all particles and assign to boxes for (int i=0; i<Np; i++) { double pos_p[3]; // initialize entry of implied linked list next[i] = -1; // get the periodic coordinates in [0,L) for (int j = 0; j < 3; j++) { //printf("%g\n",IDX(X,j,i)); pos_p[j] = remainder(IDX(X,j,i),L) + L/2.; } // which box does the particle belong to? idx = (int)(pos_p[0]/L*boxdim); idy = (int)(pos_p[1]/L*boxdim); idz = (int)(pos_p[2]/L*boxdim); // add to beginning of implied linked list bp = &b[idx][idy][idz]; next[i] = bp->head; bp->head = i; } int p1, p2; double d2, dx, dy, dz; IXClearPairs(ix); #pragma omp parallel for collapse(3) default(shared) private (p1,p2,d2,dx,dy,dz,idx,idy,idz,bp,neigh_idx,neigh_idy,neigh_idz,neigh_bp) for (idx=0; idx<boxdim; idx++) { for (idy=0; idy<boxdim; idy++) { for (idz=0; idz<boxdim; idz++) { bp = &b[idx][idy][idz]; // within box interactions p1 = bp->head; while (p1 != -1) { p2 = next[p1]; while (p2 != -1) { d2 = dist_and_disp(IDX(X,0,p1),IDX(X,1,p1),IDX(X,2,p1), IDX(X,0,p2),IDX(X,1,p2),IDX(X,2,p2), L, &dx, &dy, &dz); if (d2 < cutoff2) { #pragma omp critical //I added this line to protect updating IXPushPair(ix,p1,p2); } p2 = next[p2]; } p1 = next[p1]; } // interactions with other boxes for (int j=0; j<NUM_BOX_NEIGHBORS; j++) { neigh_idx = (idx + box_neighbors[j][0] + boxdim) % boxdim; neigh_idy = (idy + box_neighbors[j][1] + boxdim) % boxdim; neigh_idz = (idz + box_neighbors[j][2] + boxdim) % boxdim; neigh_bp = &b[neigh_idx][neigh_idy][neigh_idz]; p1 = neigh_bp->head; while (p1 != -1) { p2 = bp->head; while (p2 != -1) { d2 = dist_and_disp(IDX(X,0,p1),IDX(X,1,p1),IDX(X,2,p1), IDX(X,0,p2),IDX(X,1,p2),IDX(X,2,p2), L, &dx, &dy, &dz); if (d2 < cutoff2) { #pragma omp critical //I added this line to protect updating IXPushPair(ix,p1,p2); } p2 = next[p2]; } p1 = next[p1]; } } } } } free(next); *Npairs = ix->curNx; *pairs = ix->pairs; #if DEBUG { int NpairsCheck; err = interactions_check(ix,X,r,*Npairs,*pairs,&NpairsCheck);CHK(err); printf("Npairs %d, NpairsCheck %d\n", *Npairs, NpairsCheck); if (*Npairs != NpairsCheck) { fprintf (stderr, "Interaction count mismatch, %d != %d\n", *Npairs, NpairsCheck); } } #endif return 0; } int IXRestorePairs(IX ix, Vector X, double r, int *Npairs, ix_pair **pairs) { *Npairs = 0; *pairs = NULL; return 0; }
7549.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { #pragma omp for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp for (i = 0; i < _PB_N; i++) { #pragma omp for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ #pragma omp for (j1 = 0; j1 < _PB_M; j1++) { #pragma omp for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
ast-dump-openmp-target-update.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test(int x) { #pragma omp target update to(x) } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-update.c:3:1, line:5:1> line:3:6 test 'void (int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:11, col:15> col:15 used x 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:18, line:5:1> // CHECK-NEXT: `-OMPTargetUpdateDirective {{.*}} <line:4:9, col:32> openmp_standalone_directive // CHECK-NEXT: |-OMPToClause {{.*}} <col:27, col:31> // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <col:9> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-CompoundStmt {{.*}} <col:9> // CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .global_tid. 'const int' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const' // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-update.c:4:9) *const restrict'
yolov2_forward_network.c
#include "additionally.h" // some definitions from: im2col.h, blas.h, list.h, utils.h, activations.h, tree.h, layer.h, network.h // softmax_layer.h, reorg_layer.h, route_layer.h, region_layer.h, maxpool_layer.h, convolutional_layer.h #define GEMMCONV /* // from: box.h typedef struct { float x, y, w, h; } box; */ // binary transpose size_t binary_transpose_align_input(int k, int n, float *b, char **t_bit_input, size_t ldb_align, int bit_align) { size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8; size_t t_intput_size = new_ldb * n; size_t t_bit_input_size = t_intput_size / 8;// +1; *t_bit_input = calloc(t_bit_input_size, sizeof(char)); //printf("\n t_bit_input_size = %d, k = %d, n = %d, new_ldb = %d \n", t_bit_input_size, k, n, new_ldb); int src_size = k * bit_align; transpose_bin(b, *t_bit_input, k, n, bit_align, new_ldb, 8); return t_intput_size; } // 4 layers in 1: convolution, batch-normalization, BIAS and activation void forward_convolutional_layer_cpu(layer l, network_state state) { int out_h = (l.h + 2 * l.pad - l.size) / l.stride + 1; // output_height=input_height for stride=1 and pad=1 int out_w = (l.w + 2 * l.pad - l.size) / l.stride + 1; // output_width=input_width for stride=1 and pad=1 int i, f, j; // fill zero (ALPHA) for (i = 0; i < l.outputs; ++i) l.output[i] = 0; if (l.xnor) { if (!l.align_bit_weights) { binarize_weights(l.weights, l.n, l.c*l.size*l.size, l.binary_weights); //printf("\n binarize_weights l.align_bit_weights = %p \n", l.align_bit_weights); } binarize_cpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input); l.weights = l.binary_weights; state.input = l.binary_input; } // l.n - number of filters on this layer // l.c - channels of input-array // l.h - height of input-array // l.w - width of input-array // l.size - width and height of filters (the same size for all filters) // 1. Convolution !!! #ifndef GEMMCONV int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < l.n; ++fil) { int chan, y, x, f_y, f_x; // channel index for (chan = 0; chan < l.c; ++chan) // input - y for (y = 0; y < l.h; ++y) // input - x for (x = 0; x < l.w; ++x) { int const output_index = fil*l.w*l.h + y*l.w + x; int const weights_pre_index = fil*l.c*l.size*l.size + chan*l.size*l.size; int const input_pre_index = chan*l.w*l.h; float sum = 0; // filter - y for (f_y = 0; f_y < l.size; ++f_y) { int input_y = y + f_y - l.pad; // filter - x for (f_x = 0; f_x < l.size; ++f_x) { int input_x = x + f_x - l.pad; if (input_y < 0 || input_x < 0 || input_y >= l.h || input_x >= l.w) continue; int input_index = input_pre_index + input_y*l.w + input_x; int weights_index = weights_pre_index + f_y*l.size + f_x; sum += state.input[input_index] * l.weights[weights_index]; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; l.output[output_index] += sum; } } #else int m = l.n; int k = l.size*l.size*l.c; int n = out_h*out_w; float *a = l.weights; float *b = state.workspace; float *c = l.output; // convolution as GEMM (as part of BLAS) for (i = 0; i < l.batch; ++i) { //im2col_cpu(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b); // im2col.c //im2col_cpu_custom(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b); // AVX2 // XNOR-net - bit-1: weights, input, calculation if (l.xnor && (l.stride == 1 && l.pad == 1)) { memset(b, 0, l.bit_align*l.size*l.size*l.c * sizeof(float)); //im2col_cpu_custom_align(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b, l.bit_align); im2col_cpu_custom_bin(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b, l.bit_align); int ldb_align = l.lda_align; size_t new_ldb = k + (ldb_align - k%ldb_align); char *t_bit_input = NULL; size_t t_intput_size = binary_transpose_align_input(k, n, b, &t_bit_input, ldb_align, l.bit_align); // 5x times faster than gemm()-float32 gemm_nn_custom_bin_mean_transposed(m, n, k, 1, l.align_bit_weights, new_ldb, t_bit_input, new_ldb, c, n, l.mean_arr); //gemm_nn_custom_bin_mean_transposed(m, n, k, 1, bit_weights, k, t_bit_input, new_ldb, c, n, mean_arr); //free(t_input); free(t_bit_input); } else { im2col_cpu_custom(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b); // AVX2 int t; #pragma omp parallel for for (t = 0; t < m; ++t) { gemm_nn(1, n, k, 1, a + t*k, k, b, n, c + t*n, n); } } c += n*m; state.input += l.c*l.h*l.w; } #endif int const out_size = out_h*out_w; // 2. Batch normalization if (l.batch_normalize) { for (f = 0; f < l.out_c; ++f) { for (i = 0; i < out_size; ++i) { int index = f*out_size + i; l.output[index] = (l.output[index] - l.rolling_mean[f]) / (sqrtf(l.rolling_variance[f]) + .000001f); } } // scale_bias for (i = 0; i < l.out_c; ++i) { for (j = 0; j < out_size; ++j) { l.output[i*out_size + j] *= l.scales[i]; } } } // 3. Add BIAS //if (l.batch_normalize) for (i = 0; i < l.n; ++i) { for (j = 0; j < out_size; ++j) { l.output[i*out_size + j] += l.biases[i]; } } // 4. Activation function (LEAKY or LINEAR) //if (l.activation == LEAKY) { // for (i = 0; i < l.n*out_size; ++i) { // l.output[i] = leaky_activate(l.output[i]); // } //} activate_array_cpu_custom(l.output, l.n*out_size, l.activation); } // MAX pooling layer void forward_maxpool_layer_cpu(const layer l, network_state state) { if (!state.train) { forward_maxpool_layer_avx(state.input, l.output, l.indexes, l.size, l.w, l.h, l.out_w, l.out_h, l.c, l.pad, l.stride, l.batch); return; } int b, i, j, k, m, n; int w_offset = -l.pad; int h_offset = -l.pad; int h = l.out_h; int w = l.out_w; int c = l.c; // batch index for (b = 0; b < l.batch; ++b) { // channel index for (k = 0; k < c; ++k) { // y - input for (i = 0; i < h; ++i) { // x - input for (j = 0; j < w; ++j) { int out_index = j + w*(i + h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; // pooling x-index for (n = 0; n < l.size; ++n) { // pooling y-index for (m = 0; m < l.size; ++m) { int cur_h = h_offset + i*l.stride + n; int cur_w = w_offset + j*l.stride + m; int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c)); int valid = (cur_h >= 0 && cur_h < l.h && cur_w >= 0 && cur_w < l.w); float val = (valid != 0) ? state.input[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; // get max index max = (val > max) ? val : max; // get max value } } l.output[out_index] = max; // store max value l.indexes[out_index] = max_i; // store max index } } } } } // Route layer - just copy 1 or more layers into the current layer void forward_route_layer_cpu(const layer l, network_state state) { int i, j; int offset = 0; // number of merged layers for (i = 0; i < l.n; ++i) { int index = l.input_layers[i]; // source layer index float *input = state.net.layers[index].output; // source layer output ptr int input_size = l.input_sizes[i]; // source layer size // batch index for (j = 0; j < l.batch; ++j) { memcpy(l.output + offset + j*l.outputs, input + j*input_size, input_size * sizeof(float)); } offset += input_size; } } // Reorg layer - just change dimension sizes of the previous layer (some dimension sizes are increased by decreasing other) void forward_reorg_layer_cpu(const layer l, network_state state) { float *out = l.output; float *x = state.input; int out_w = l.out_w; int out_h = l.out_h; int out_c = l.out_c; int batch = l.batch; int stride = l.stride; int b, i, j, k; int in_c = out_c / (stride*stride); //printf("\n out_c = %d, out_w = %d, out_h = %d, stride = %d, forward = %d \n", out_c, out_w, out_h, stride, forward); //printf(" in_c = %d, in_w = %d, in_h = %d \n", in_c, out_w*stride, out_h*stride); // batch for (b = 0; b < batch; ++b) { // channel for (k = 0; k < out_c; ++k) { // y for (j = 0; j < out_h; ++j) { // x for (i = 0; i < out_w; ++i) { int in_index = i + out_w*(j + out_h*(k + out_c*b)); int c2 = k % in_c; int offset = k / in_c; int w2 = i*stride + offset % stride; int h2 = j*stride + offset / stride; int out_index = w2 + out_w*stride*(h2 + out_h*stride*(c2 + in_c*b)); out[in_index] = x[out_index]; } } } } } // ---- upsample layer ---- // upsample_layer.c void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { int i, j, k, b; for (b = 0; b < batch; ++b) { for (k = 0; k < c; ++k) { for (j = 0; j < h*stride; ++j) { for (i = 0; i < w*stride; ++i) { int in_index = b*w*h*c + k*w*h + (j / stride)*w + i / stride; int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i; if (forward) out[out_index] = scale*in[in_index]; else in[in_index] += scale*out[out_index]; } } } } } // upsample_layer.c void forward_upsample_layer_cpu(const layer l, network_state net) { fill_cpu(l.outputs*l.batch, 0, l.output, 1); if (l.reverse) { upsample_cpu(l.output, l.out_w, l.out_h, l.c, l.batch, l.stride, 0, l.scale, net.input); } else { upsample_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output); } } // blas.c (shortcut_layer) void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int stride = w1 / w2; int sample = w2 / w1; assert(stride == h1 / h2); assert(sample == h2 / h1); if (stride < 1) stride = 1; if (sample < 1) sample = 1; int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int i, j, k, b; for (b = 0; b < batch; ++b) { for (k = 0; k < minc; ++k) { for (j = 0; j < minh; ++j) { for (i = 0; i < minw; ++i) { int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] += add[add_index]; } } } } } // blas.c void copy_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for (i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX]; } // shortcut_layer.c void forward_shortcut_layer_cpu(const layer l, network_state state) { copy_cpu(l.outputs*l.batch, state.input, 1, l.output, 1); shortcut_cpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.output); activate_array(l.output, l.outputs*l.batch, l.activation); } // ---- yolo layer ---- void forward_yolo_layer_cpu(const layer l, network_state state) { int i, j, b, t, n; memcpy(l.output, state.input, l.outputs*l.batch * sizeof(float)); #ifndef GPU for (b = 0; b < l.batch; ++b) { for (n = 0; n < l.n; ++n) { int index = entry_index(l, b, n*l.w*l.h, 0); activate_array(l.output + index, 2 * l.w*l.h, LOGISTIC); index = entry_index(l, b, n*l.w*l.h, 4); activate_array(l.output + index, (1 + l.classes)*l.w*l.h, LOGISTIC); } } #endif //memset(l.delta, 0, l.outputs * l.batch * sizeof(float)); } // ---- region layer ---- static void softmax_cpu(float *input, int n, float temp, float *output) { int i; float sum = 0; float largest = -FLT_MAX; for (i = 0; i < n; ++i) { if (input[i] > largest) largest = input[i]; } for (i = 0; i < n; ++i) { float e = expf(input[i] / temp - largest / temp); sum += e; output[i] = e; } for (i = 0; i < n; ++i) { output[i] /= sum; } } static void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output) { int b; for (b = 0; b < batch; ++b) { int i; int count = 0; for (i = 0; i < hierarchy->groups; ++i) { int group_size = hierarchy->group_size[i]; softmax_cpu(input + b*inputs + count, group_size, temp, output + b*inputs + count); count += group_size; } } } // --- // Region layer - just change places of array items, then do logistic_activate and softmax void forward_region_layer_cpu(const layer l, network_state state) { int i, b; int size = l.coords + l.classes + 1; // 4 Coords(x,y,w,h) + Classes + 1 Probability-t0 memcpy(l.output, state.input, l.outputs*l.batch * sizeof(float)); //flatten(l.output, l.w*l.h, size*l.n, l.batch, 1); // convert many channels to the one channel (depth=1) // (each grid cell will have a number of float-variables equal = to the initial number of channels) { float *x = l.output; int layer_size = l.w*l.h; // W x H - size of layer int layers = size*l.n; // number of channels (where l.n = number of anchors) int batch = l.batch; float *swap = calloc(layer_size*layers*batch, sizeof(float)); int i, c, b; // batch index for (b = 0; b < batch; ++b) { // channel index for (c = 0; c < layers; ++c) { // layer grid index for (i = 0; i < layer_size; ++i) { int i1 = b*layers*layer_size + c*layer_size + i; int i2 = b*layers*layer_size + i*layers + c; swap[i2] = x[i1]; } } } memcpy(x, swap, layer_size*layers*batch * sizeof(float)); free(swap); } // logistic activation only for: t0 (where is t0 = Probability * IoU(box, object)) for (b = 0; b < l.batch; ++b) { // for each item (x, y, anchor-index) for (i = 0; i < l.h*l.w*l.n; ++i) { int index = size*i + b*l.outputs; float x = l.output[index + 4]; l.output[index + 4] = 1.0F / (1.0F + expf(-x)); // logistic_activate_cpu(l.output[index + 4]); } } if (l.softmax_tree) { // Yolo 9000 for (b = 0; b < l.batch; ++b) { for (i = 0; i < l.h*l.w*l.n; ++i) { int index = size*i + b*l.outputs; softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5); } } } else if (l.softmax) { // Yolo v2 // softmax activation only for Classes probability for (b = 0; b < l.batch; ++b) { // for each item (x, y, anchor-index) for (i = 0; i < l.h*l.w*l.n; ++i) { int index = size*i + b*l.outputs; softmax_cpu(l.output + index + 5, l.classes, 1, l.output + index + 5); } } } } void yolov2_forward_network_cpu(network net, network_state state) { state.workspace = net.workspace; int i; for (i = 0; i < net.n; ++i) { state.index = i; layer l = net.layers[i]; if (l.type == CONVOLUTIONAL) { forward_convolutional_layer_cpu(l, state); //printf("\n CONVOLUTIONAL \t\t l.size = %d \n", l.size); } else if (l.type == MAXPOOL) { forward_maxpool_layer_cpu(l, state); //printf("\n MAXPOOL \t\t l.size = %d \n", l.size); } else if (l.type == ROUTE) { forward_route_layer_cpu(l, state); //printf("\n ROUTE \t\t\t l.n = %d \n", l.n); } else if (l.type == REORG) { forward_reorg_layer_cpu(l, state); //printf("\n REORG \n"); } else if (l.type == UPSAMPLE) { forward_upsample_layer_cpu(l, state); //printf("\n UPSAMPLE \n"); } else if (l.type == SHORTCUT) { forward_shortcut_layer_cpu(l, state); //printf("\n SHORTCUT \n"); } else if (l.type == YOLO) { forward_yolo_layer_cpu(l, state); //printf("\n YOLO \n"); } else if (l.type == REGION) { forward_region_layer_cpu(l, state); //printf("\n REGION \n"); } else { printf("\n layer: %d \n", l.type); } state.input = l.output; } } // detect on CPU float *network_predict_cpu(network net, float *input) { network_state state; state.net = net; state.index = 0; state.input = input; state.truth = 0; state.train = 0; state.delta = 0; yolov2_forward_network_cpu(net, state); // network on CPU //float *out = get_network_output(net); int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; return net.layers[i].output; } // -------------------- // x - last conv-layer output // biases - anchors from cfg-file // n - number of anchors from cfg-file box get_region_box_cpu(float *x, float *biases, int n, int index, int i, int j, int w, int h) { box b; b.x = (i + logistic_activate(x[index + 0])) / w; // (col + 1./(1. + exp(-x))) / width_last_layer b.y = (j + logistic_activate(x[index + 1])) / h; // (row + 1./(1. + exp(-x))) / height_last_layer b.w = expf(x[index + 2]) * biases[2 * n] / w; // exp(x) * anchor_w / width_last_layer b.h = expf(x[index + 3]) * biases[2 * n + 1] / h; // exp(x) * anchor_h / height_last_layer return b; } // get prediction boxes void get_region_boxes_cpu(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map) { int i, j, n; float *predictions = l.output; // grid index for (i = 0; i < l.w*l.h; ++i) { int row = i / l.w; int col = i % l.w; // anchor index for (n = 0; n < l.n; ++n) { int index = i*l.n + n; // index for each grid-cell & anchor int p_index = index * (l.classes + 5) + 4; float scale = predictions[p_index]; // scale = t0 = Probability * IoU(box, object) if (l.classfix == -1 && scale < .5) scale = 0; // if(t0 < 0.5) t0 = 0; int box_index = index * (l.classes + 5); boxes[index] = get_region_box_cpu(predictions, l.biases, n, box_index, col, row, l.w, l.h); boxes[index].x *= w; boxes[index].y *= h; boxes[index].w *= w; boxes[index].h *= h; int class_index = index * (l.classes + 5) + 5; // Yolo 9000 or Yolo v2 if (l.softmax_tree) { // Yolo 9000 hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0); int found = 0; if (map) { for (j = 0; j < 200; ++j) { float prob = scale*predictions[class_index + map[j]]; probs[index][j] = (prob > thresh) ? prob : 0; } } else { for (j = l.classes - 1; j >= 0; --j) { if (!found && predictions[class_index + j] > .5) { found = 1; } else { predictions[class_index + j] = 0; } float prob = predictions[class_index + j]; probs[index][j] = (scale > thresh) ? prob : 0; } } } else { // Yolo v2 for (j = 0; j < l.classes; ++j) { float prob = scale*predictions[class_index + j]; // prob = IoU(box, object) = t0 * class-probability probs[index][j] = (prob > thresh) ? prob : 0; // if (IoU < threshold) IoU = 0; } } if (only_objectness) { probs[index][0] = scale; } } } } // ------ Calibration -------- // detect on CPU float *network_calibrate_cpu(network net, float *input) { network_state state; state.net = net; state.index = 0; state.input = input; state.truth = 0; state.train = 0; state.delta = 0; //yolov2_forward_network_cpu(net, state); // network on CPU // input calibration - for quantinization static int max_num = 100; static int counter = 0; static float *input_mult_array = NULL; if (net.do_input_calibration > 0) { // calibration for quantinization max_num = net.do_input_calibration; if (input_mult_array == NULL) { input_mult_array = (float *)calloc(net.n * max_num, sizeof(float)); } ++counter; // save calibration coefficients if (counter > max_num) { printf("\n\n Saving coefficients to the input_calibration.txt file... \n\n"); FILE* fw = fopen("input_calibration.txt", "wb"); char buff[1024]; //printf("\n float input_mult[] = { "); char *str1 = "input_calibration = "; printf("%s", str1); fwrite(str1, sizeof(char), strlen(str1), fw); int i; for (i = 0; i < net.n; ++i) if (net.layers[i].type == CONVOLUTIONAL) { printf("%g, ", input_mult_array[0 + i*max_num]); sprintf(buff, "%g, ", input_mult_array[0 + i*max_num]); fwrite(buff, sizeof(char), strlen(buff), fw); } char *str2 = "16"; printf("%s \n ---------------------------", str2); fwrite(str2, sizeof(char), strlen(str2), fw); fclose(fw); getchar(); exit(0); } } state.workspace = net.workspace; int i; for (i = 0; i < net.n; ++i) { state.index = i; layer l = net.layers[i]; if (l.type == CONVOLUTIONAL) { if (net.do_input_calibration) { // calibration for quantinization //float multiplier = entropy_calibration(state.input, l.inputs, 1.0 / 8192, 2048); float multiplier = entropy_calibration(state.input, l.inputs, 1.0 / 16, 4096); //float multiplier = entropy_calibration(state.input, l.inputs, 1.0 / 4, 2*4096); printf(" multiplier = %f, l.inputs = %d \n\n", multiplier, l.inputs); input_mult_array[counter + i*max_num] = multiplier; if (counter >= max_num) { int j; float res_mult = 0; for (j = 0; j < max_num; ++j) res_mult += input_mult_array[j + i*max_num]; res_mult = res_mult / max_num; input_mult_array[0 + i*max_num] = res_mult; printf(" res_mult = %f, max_num = %d \n", res_mult, max_num); } } forward_convolutional_layer_cpu(l, state); //printf("\n CONVOLUTIONAL \t\t l.size = %d \n", l.size); } else if (l.type == MAXPOOL) { forward_maxpool_layer_cpu(l, state); //printf("\n MAXPOOL \t\t l.size = %d \n", l.size); } else if (l.type == ROUTE) { forward_route_layer_cpu(l, state); //printf("\n ROUTE \t\t\t l.n = %d \n", l.n); } else if (l.type == REORG) { forward_reorg_layer_cpu(l, state); //printf("\n REORG \n"); } else if (l.type == REGION) { forward_region_layer_cpu(l, state); //printf("\n REGION \n"); } else { printf("\n layer: %d \n", l.type); } state.input = l.output; } //int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; return net.layers[i].output; }
DRB074-flush-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This benchmark is extracted from flush_nolist.1c of OpenMP Application Programming Interface Examples Version 4.5.0 . We added one critical section to make it a test with only one pair of data races. The data race will not generate wrong result though. So the assertion always passes. Data race pair: i@70:10 vs. i@71:11 */ #include<stdio.h> #include<assert.h> void f1(int *q) { #pragma omp critical *q = 1; #pragma omp flush } int main() { int i=0, sum=0; #pragma omp parallel reduction(+:sum) num_threads(10) { f1(&i); sum+=i; } assert (sum==10); printf("sum=%d\n", sum); return 0; }
channel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC H H AAA N N N N EEEEE L % % C H H A A NN N NN N E L % % C HHHHH AAAAA N N N N N N EEE L % % C H H A A N NN N NN E L % % CCCC H H A A N N N N EEEEE LLLLL % % % % % % MagickCore Image Channel Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a n n e l F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChannelFxImage() applies a channel expression to the specified image. The % expression consists of one or more channels, either mnemonic or numeric (e.g. % red, 1), separated by actions as follows: % % <=> exchange two channels (e.g. red<=>blue) % => copy one channel to another channel (e.g. red=>green) % = assign a constant value to a channel (e.g. red=50%) % , write new image channels in the specified order (e.g. red, green) % | add a new output image for the next set of channel operations % ; move to the next input image for the source of channel data % % For example, to create 3 grayscale images from the red, green, and blue % channels of an image, use: % % -channel-fx "red; green; blue" % % A channel without an operation symbol implies separate (i.e, semicolon). % % The format of the ChannelFxImage method is: % % Image *ChannelFxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A channel expression. % % o exception: return any errors or warnings in this structure. % */ typedef enum { ExtractChannelOp, AssignChannelOp, ExchangeChannelOp, TransferChannelOp } ChannelFx; static MagickBooleanType ChannelImage(Image *destination_image, const PixelChannel destination_channel,const ChannelFx channel_op, const Image *source_image,const PixelChannel source_channel, const Quantum pixel,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; size_t height, width; ssize_t y; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); destination_view=AcquireAuthenticCacheView(destination_image,exception); height=MagickMin(source_image->rows,destination_image->rows); width=MagickMin(source_image->columns,destination_image->columns); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { PixelTrait destination_traits, source_traits; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(destination_view,0,y, destination_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } destination_traits=GetPixelChannelTraits(destination_image, destination_channel); source_traits=GetPixelChannelTraits(source_image,source_channel); if ((destination_traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; for (x=0; x < (ssize_t) width; x++) { if (channel_op == AssignChannelOp) SetPixelChannel(destination_image,destination_channel,pixel,q); else SetPixelChannel(destination_image,destination_channel, GetPixelChannel(source_image,source_channel,p),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(destination_image); } if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *ChannelFxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define ChannelFxImageTag "ChannelFx/Image" ChannelFx channel_op; ChannelType channel_mask; char token[MagickPathExtent]; const char *p; const Image *source_image; double pixel; Image *destination_image; MagickBooleanType status; PixelChannel source_channel, destination_channel; ssize_t channels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); source_image=image; destination_image=CloneImage(source_image,0,0,MagickTrue,exception); if (destination_image == (Image *) NULL) return((Image *) NULL); if (expression == (const char *) NULL) return(destination_image); status=SetImageStorageClass(destination_image,DirectClass,exception); if (status == MagickFalse) { destination_image=GetLastImageInList(destination_image); return((Image *) NULL); } destination_channel=RedPixelChannel; channel_mask=UndefinedChannel; pixel=0.0; p=(char *) expression; GetNextToken(p,&p,MagickPathExtent,token); channel_op=ExtractChannelOp; for (channels=0; *token != '\0'; ) { ssize_t i; /* Interpret channel expression. */ switch (*token) { case ',': { GetNextToken(p,&p,MagickPathExtent,token); break; } case '|': { if (GetNextImageInList(source_image) != (Image *) NULL) source_image=GetNextImageInList(source_image); else source_image=GetFirstImageInList(source_image); GetNextToken(p,&p,MagickPathExtent,token); break; } case ';': { Image *canvas; (void) SetPixelChannelMask(destination_image,channel_mask); if ((channel_op == ExtractChannelOp) && (channels == 1)) { (void) SetPixelMetaChannels(destination_image,0,exception); (void) SetImageColorspace(destination_image,GRAYColorspace, exception); } canvas=CloneImage(source_image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) { destination_image=DestroyImageList(destination_image); return(destination_image); } AppendImageToList(&destination_image,canvas); destination_image=GetLastImageInList(destination_image); status=SetImageStorageClass(destination_image,DirectClass,exception); if (status == MagickFalse) { destination_image=GetLastImageInList(destination_image); return((Image *) NULL); } GetNextToken(p,&p,MagickPathExtent,token); channels=0; destination_channel=RedPixelChannel; channel_mask=UndefinedChannel; break; } default: break; } i=ParsePixelChannelOption(token); if (i < 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnrecognizedChannelType","`%s'",token); destination_image=DestroyImageList(destination_image); return(destination_image); } source_channel=(PixelChannel) i; channel_op=ExtractChannelOp; GetNextToken(p,&p,MagickPathExtent,token); if (*token == '<') { channel_op=ExchangeChannelOp; GetNextToken(p,&p,MagickPathExtent,token); } if (*token == '=') { if (channel_op != ExchangeChannelOp) channel_op=AssignChannelOp; GetNextToken(p,&p,MagickPathExtent,token); } if (*token == '>') { if (channel_op != ExchangeChannelOp) channel_op=TransferChannelOp; GetNextToken(p,&p,MagickPathExtent,token); } switch (channel_op) { case AssignChannelOp: case ExchangeChannelOp: case TransferChannelOp: { if (channel_op == AssignChannelOp) pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0); else { i=ParsePixelChannelOption(token); if (i < 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnrecognizedChannelType","`%s'",token); destination_image=DestroyImageList(destination_image); return(destination_image); } } destination_channel=(PixelChannel) i; if (i >= (ssize_t) GetPixelChannels(destination_image)) (void) SetPixelMetaChannels(destination_image,(size_t) ( destination_channel-GetPixelChannels(destination_image)+1), exception); if (image->colorspace != UndefinedColorspace) switch (destination_channel) { case RedPixelChannel: case GreenPixelChannel: case BluePixelChannel: case BlackPixelChannel: case IndexPixelChannel: break; case AlphaPixelChannel: { destination_image->alpha_trait=BlendPixelTrait; break; } case CompositeMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | CompositeMaskChannel); break; } case ReadMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | ReadMaskChannel); break; } case WriteMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | WriteMaskChannel); break; } case MetaPixelChannel: default: { (void) SetPixelMetaChannels(destination_image,(size_t) ( destination_channel-GetPixelChannels(destination_image)+1), exception); break; } } channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token)); if (((channels >= 1) || (destination_channel >= 1)) && (IsGrayColorspace(destination_image->colorspace) != MagickFalse)) (void) SetImageColorspace(destination_image,sRGBColorspace,exception); GetNextToken(p,&p,MagickPathExtent,token); break; } default: break; } status=ChannelImage(destination_image,destination_channel,channel_op, source_image,source_channel,ClampToQuantum(pixel),exception); if (status == MagickFalse) { destination_image=DestroyImageList(destination_image); break; } channels++; if (channel_op == ExchangeChannelOp) { status=ChannelImage(destination_image,source_channel,channel_op, source_image,destination_channel,ClampToQuantum(pixel),exception); if (status == MagickFalse) { destination_image=DestroyImageList(destination_image); break; } channels++; } switch (channel_op) { case ExtractChannelOp: { channel_mask=(ChannelType) (channel_mask | (1UL << destination_channel)); destination_channel=(PixelChannel) (destination_channel+1); break; } default: break; } status=SetImageProgress(source_image,ChannelFxImageTag,p-expression, strlen(expression)); if (status == MagickFalse) break; } (void) SetPixelChannelMask(destination_image,channel_mask); if ((channel_op == ExtractChannelOp) && (channels == 1)) { (void) SetPixelMetaChannels(destination_image,0,exception); (void) SetImageColorspace(destination_image,GRAYColorspace,exception); } return(GetFirstImageInList(destination_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *images,const ColorspaceType colorspace, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o colorspace: the image colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse) { combine_image=DestroyImage(combine_image); return((Image *) NULL); } if (colorspace != UndefinedColorspace) (void) SetImageColorspace(combine_image,colorspace,exception); else if (fabs(image->gamma-1.0) <= MagickEpsilon) (void) SetImageColorspace(combine_image,RGBColorspace,exception); else (void) SetImageColorspace(combine_image,sRGBColorspace,exception); switch (combine_image->colorspace) { case UndefinedColorspace: case sRGBColorspace: { if (GetImageListLength(image) > 3) combine_image->alpha_trait=BlendPixelTrait; break; } case LinearGRAYColorspace: case GRAYColorspace: { if (GetImageListLength(image) > 1) combine_image->alpha_trait=BlendPixelTrait; break; } case CMYKColorspace: { if (GetImageListLength(image) > 4) combine_image->alpha_trait=BlendPixelTrait; break; } default: break; } /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireAuthenticCacheView(combine_image,exception); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; Quantum *pixels; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t i; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } next=image; for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++) { register ssize_t x; PixelChannel channel = GetPixelChannelChannel(combine_image,i); PixelTrait traits = GetPixelChannelTraits(combine_image,channel); if (traits == UndefinedPixelTrait) continue; if (next == (Image *) NULL) continue; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const Quantum *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { if (x < (ssize_t) next->columns) { q[i]=GetPixelGray(next,p); p+=GetPixelChannels(next); } q+=GetPixelChannels(combine_image); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CombineImageTag,progress, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImage() separates a channel from the image and returns it as a % grayscale image. % % The format of the SeparateImage method is: % % Image *SeparateImage(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the image channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImage(const Image *image, const ChannelType channel_type,ExceptionInfo *exception) { #define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01) #define SeparateImageTag "Separate/Image" CacheView *image_view, *separate_view; Image *separate_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image=CloneImage(image,0,0,MagickTrue,exception); if (separate_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse) { separate_image=DestroyImage(separate_image); return((Image *) NULL); } separate_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(separate_image,GRAYColorspace,exception); separate_image->gamma=image->gamma; /* Separate image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); separate_view=AcquireAuthenticCacheView(separate_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; SetPixelChannel(separate_image,GrayPixelChannel,(Quantum) 0,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (GetChannelBit(channel_type,channel) == 0)) continue; SetPixelChannel(separate_image,GrayPixelChannel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(separate_image); } if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } separate_view=DestroyCacheView(separate_view); image_view=DestroyCacheView(image_view); (void) SetImageChannelMask(separate_image,DefaultChannels); if (status == MagickFalse) separate_image=DestroyImage(separate_image); return(separate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % Image *SeparateImages(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception) { Image *images, *separate_image; register ssize_t i; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; separate_image=SeparateImage(image,(ChannelType) (1UL << channel), exception); if (separate_image != (Image *) NULL) AppendImageToList(&images,separate_image); } if (images == (Image *) NULL) images=SeparateImage(image,UndefinedChannel,exception); return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelOption alpha_type,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel, % DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel, % OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel, % and TransparentAlphaChannel. % % o exception: return any errors or warnings in this structure. % */ static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p, const double alpha,const Quantum *q,const double beta, Quantum *composite) { double Da, gamma, Sa; register ssize_t i; /* Compose pixel p over pixel q with the given alpha. */ Sa=QuantumScale*alpha; Da=QuantumScale*beta, gamma=Sa*(-Da)+Sa+Da; gamma=PerceptibleReciprocal(gamma); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; switch (channel) { case RedPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->red,alpha)); break; } case GreenPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->green,alpha)); break; } case BluePixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->blue,alpha)); break; } case BlackPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->black,alpha)); break; } case AlphaPixelChannel: { composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da)); break; } default: break; } } } MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelOption alpha_type,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { image->alpha_trait=BlendPixelTrait; break; } case AssociateAlphaChannel: { /* Associate alpha. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (channel == AlphaPixelChannel) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=CopyPixelTrait; return(status); } case BackgroundAlphaChannel: { /* Set transparent pixels to background color. */ if (image->alpha_trait == UndefinedPixelTrait) break; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,q) == TransparentAlpha) { SetPixelViaPixelInfo(image,&image->background_color,q); SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case CopyAlphaChannel: { image->alpha_trait=UpdatePixelTrait; status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0, exception); break; } case DeactivateAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=CopyPixelTrait; break; } case DisassociateAlphaChannel: { /* Disassociate alpha. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image->alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma, Sa; register ssize_t i; Sa=QuantumScale*GetPixelAlpha(image,q); gamma=PerceptibleReciprocal(Sa); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (channel == AlphaPixelChannel) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=UndefinedPixelTrait; return(status); } case DiscreteAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=UpdatePixelTrait; break; } case ExtractAlphaChannel: { status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0, exception); image->alpha_trait=UndefinedPixelTrait; break; } case OffAlphaChannel: { image->alpha_trait=UndefinedPixelTrait; break; } case OnAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=BlendPixelTrait; break; } case OpaqueAlphaChannel: { status=SetImageAlpha(image,OpaqueAlpha,exception); break; } case RemoveAlphaChannel: { /* Remove transparency. */ if (image->alpha_trait == UndefinedPixelTrait) break; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { FlattenPixelInfo(image,&image->background_color, image->background_color.alpha,q,(double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=image->background_color.alpha_trait; break; } case SetAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); break; } case ShapeAlphaChannel: { /* Set alpha channel by shape. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image->alpha_trait=UpdatePixelTrait; (void) SetImageMask(image,WritePixelMask,image,exception); (void) LevelImageColors(image,&image->background_color, &image->background_color,MagickTrue,exception); (void) SetImageMask(image,WritePixelMask,(Image *) NULL,exception); break; } case TransparentAlphaChannel: { status=SetImageAlpha(image,TransparentAlpha,exception); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return(status); (void) SetPixelChannelMask(image,image->channel_mask); return(SyncImagePixelCache(image,exception)); }
preprocess.c
#include<stdlib.h> #include "graph.h" #include "mainFunctions.h" #include "print.h" //#include "powerperformacetracking.h" //#include "communities.h" #include "graphprop.h" #include "nodeIntMap.h" #include <string.h> #define DEBUG_ON int adj = 0; // 1 for reverse adjlist or 0 in adjlist. bool skewed = false; node_t* comm = NULL; typedef struct graphmap { node_t comm; node_t newPos; node_t revPos; } graphmap; typedef struct ClusterDetails { int id; int numNodes; int numEdges; int external; int commDistance; // node_t* nodeList; } ClusterDetails; inline void CopyClusterDetails(ClusterDetails* cd,int src, int dest) { cd[dest].id = cd[src].id; cd[dest].numNodes = cd[src].numNodes; cd[dest].numEdges = cd[src].numEdges; cd[dest].external = cd[src].external; cd[dest].commDistance = cd[src].commDistance; } graph* createPreprocessedGraph(graph *G, graphmap * gm) { /* Create the copy */ graph* newG = createGraph(); bool hasEdgeWeight= false; newG->numNodes = G->numNodes; newG->numEdges = G->numEdges; newG->begin = (edge_t*) malloc (sizeof(edge_t) * (newG->numNodes+1)); #ifdef DEBUG_ON assert(newG->begin != NULL); #endif newG->node_idx = (node_t*) malloc(sizeof(node_t) * newG->numEdges); edge_t edgepos = 0; newG->begin[0] = 0; for(int i=0;i< newG->numNodes; i++) { assert(gm[i].revPos != NIL_NODE); node_t origPos = gm[i].revPos; // reserse the edge list size newG->begin[i+1] = newG->begin[i] + (G->begin[origPos+1] - G->begin[origPos]); edge_t st = newG->begin[i]; edge_t ed = newG->begin[i]; // add edges for(edge_t e = G->begin[origPos]; e < G->begin[origPos+1]; e++) { node_t end = G->node_idx[e]; assert(gm[end].newPos != NIL_NODE); // get from map the new node id node_t newEnd = gm[end].newPos; edge_t sorte = st; // find right position for(; sorte < ed ; sorte++) { if(newEnd < newG->node_idx[sorte]) break; } // shift edges for(edge_t sf = ed-1; sf >= sorte; sf --) { newG->node_idx[sf+1] = newG->node_idx[sf]; } // add to right position newG->node_idx[sorte] = newEnd; ed++; } assert(ed == newG->begin[i+1]); } return newG; } /* TODO there can be atmost 1 node with no incomming edges in the community. (The universal source node of the community). If we can somehow find that node (if present, possibly it will the community ID TODO VERIFY AND OPTIMIZE THIS), the we can get away with creating the stacklist and commlist and directly use the G and comm to create the consolidated list. commlist = nodes in community stacklist = stack visited = visited list commid community id */ void * topologicalSort(graph * G, node_t* commlist , node_t* stacklist, int* visited, node_t* comm, node_t* commpos, int commSize, node_t commId) { int sortedSize = 0; stacklist[sortedSize] = commlist[0]; visited[0] = 1; sortedSize++; int curIndex = 0; node_t source; // printf("The start of stack \n"); while(sortedSize < commSize) { if(curIndex == sortedSize) { for(int i =1; i< commSize; i++ ) { if(visited[commpos[commlist[i]]] == 0) { stacklist[sortedSize] = commlist[i]; visited[commpos[commlist[i]]] = 1; sortedSize++; break; } } } assert(curIndex< sortedSize); source = stacklist[curIndex]; curIndex++; for(edge_t e = G->begin[source]; e < G->begin[source+1]; e++) { node_t d = G->node_idx[e]; if(comm[d] == commId && visited[commpos[d]] == 0) { stacklist[sortedSize] = d; visited[commpos[d]] = 1; sortedSize++; } } } // printf("The end of stack \n"); assert(sortedSize == commSize); } void dumpmapping(graph *G, graphmap* gm,const char * filename) { FILE *fp = fopen(filename, "w"); for(node_t i = 0; i < G->numNodes; i++) { fprintf(fp, "%d\t%d\n", i, gm[i].newPos); } } void initCommunities(graph *G) { comm = (node_t*) malloc (G->numNodes * sizeof(node_t)); assert(comm != NULL); } double* coeff; #define cohval(index,arr) arr[index] void merge_serial(node_t* index, double* vals, node_t start, node_t mid, node_t end) { node_t t1 = start; node_t t2 = mid; node_t tmp; node_t temp; node_t w = mid-1; node_t tempqueue[mid - start]; // should we use malloc ? node_t tpf = 0; node_t tpb = 0; while(t1 < mid && t2< end) { if(tpf == tpb) { // empty queue if(cohval(index[t1], vals) < cohval(index[t2], vals)) { tempqueue[tpf] = index[t1]; tpf++; index[t1] = index[t2]; t2++; } } else{ if(cohval(tempqueue[tpb], vals) < cohval(index[t2], vals)) { tempqueue[tpf] = index[t1]; tpf++; index[t1] = index[t2]; t2++; } else { tempqueue[tpf] = index[t1]; tpf++; index[t1] = tempqueue[tpb]; tpb++; } } t1++; } if(t1 < mid) { // on highly rare occations // copy rest of the first half to the temp array while(t1 < mid) { tempqueue[tpf] = index[t1]; tpf++; } // now copy back withou comparison t1 array already sorted. while(tpf > tpb ) { index[t1] = tempqueue[tpb]; t1++; tpb++; } } else { while(tpf > tpb ) { if(t2 < end && cohval(tempqueue[tpb], vals) < cohval(index[t2], vals)) { index[t1] = index[t2]; t2++; } else { index[t1] = tempqueue[tpb]; tpb++; } t1++; } } // TODO add assert? assert(tpf == tpb); if(t2 < end) { assert(t1 == (t2-1)); } else { //assert(t2 == end); assert(t1 == end); } // } void merge_parallel(node_t* index1, double* vals, node_t start, node_t mid, node_t end) { // TODO } void sort_selection(node_t* index, double* vals, node_t start, node_t end) { node_t sindex; for(node_t i = start; i< end-1; i++) { sindex = i; double coh = cohval(index[i], vals); for(node_t j = start+1; j<end; j++) { double coj = cohval(index[j], vals); if(coj > coh) { sindex = j; coh = coj; } } node_t temp = index[i]; index[i] = index[sindex]; index[sindex] = temp; } } void sort_serial(node_t* index, double* vals, node_t start, node_t end) { if((end-start) < 1024) { sort_selection(index, vals, start, end); } else { node_t midpoint = (start+end)/2; sort_serial(index, vals, start, midpoint); sort_serial(index, vals, midpoint, end); merge_serial(index, vals, start, midpoint, end); } } void sort_parallel(node_t* index, double* vals, node_t start, node_t end) { if(end - start < 8192) { sort_serial(index, vals, start, end); } else { /* #pragma omp parallel */ /* { */ #pragma omp task sort_parallel(index, vals, start , (start+end)/2); #pragma omp task sort_parallel(index, vals, (start+end)/2 , end); #pragma omp taskwait merge_serial(index, vals, start, (start+end)/2, end); /* } */ } } void sort(node_t* index, double* vals, node_t start, node_t end) { if((end - start) < 8192) { sort_serial(index, vals,start,end); } else { #pragma omp parallel { sort_parallel(index, vals, start, end); } } } /* int comp (const void * elem1, const void * elem2) { */ /* double f = coeff[*((int*)elem1)]; */ /* double s = coeff[*((int*)elem2)]; */ /* if (f > s) return -1; */ /* if (f < s) return 1; */ /* return 0; */ /* } */ graph* preprocess(graph* G, const char* mapfile) { bool finished = false ; finished = true ; double mean = ((double)G->numEdges)/ G->numNodes; double upperlimit = 1.5 * mean; double lowerlimit = mean * 0.5; // int outliers = 0; bool hasEdgeWeight = false; if(G->weights!= NULL) hasEdgeWeight = true; struct timeval start, end; if(adj == 1) { /* reverse edge parallelism */ createReverseEdges(G); edge_t* r_begin = G->r_begin; node_t* r_node_idx = G->r_node_idx; free(G->begin); free(G->node_idx); G->begin = r_begin; G->node_idx = r_node_idx; } gettimeofday(&start, NULL); initCommunities(G); #pragma omp parallel { #pragma omp for for (node_t x = 0; x < G->numNodes; x ++) comm[x] = x ; } int maxItrs = 100; int itrs = 0; do { finished = true ; #pragma omp parallel { nodeIntMap *map; map = NULL; map = initNodeIntMap(map, 32, 0); node_t x0; #pragma omp for schedule(dynamic, PAR_CHUNKSIZE) for (x0 = 0; x0 < G->numNodes; x0 ++) { /* We classify only nodes with more than one edge. The non classified nodes will be pused to last. */ //if((G->begin[x0+1] - G->begin[x0]) > 1) { map = reinitNodeIntMap(map, G->begin[x0+1] - G->begin[x0], 0); for (edge_t y_idx = G->begin[x0];y_idx < G->begin[x0+1] ; y_idx ++) { node_t y = G->node_idx [y_idx]; node_t source; source = comm[y]; changeValue(map, source, 1); } node_t maxVal = mapMaxValueKey(map); if ( comm[x0] != maxVal && maxVal != NIL_NODE) { #pragma omp atomic write comm[x0] = maxVal; finished = false ; } //} } closeNodeIntMap(map); } itrs++; } while ( !finished && maxItrs > itrs); gettimeofday(&end, NULL); printf("Community detection The required time is %f \n",((end.tv_sec - start.tv_sec)*1000 + ((double)(end.tv_usec - start.tv_usec))/1000)); gettimeofday(&start, NULL); ClusterDetails* cd = (ClusterDetails*) malloc (G->numNodes * sizeof(ClusterDetails)); graphmap* gm = (graphmap*) malloc (G->numNodes * sizeof(graphmap)); #pragma omp parallel for for(node_t i=0; i<G->numNodes; i++ ) { gm[i].newPos = NIL_NODE; gm[i].revPos = NIL_NODE; cd[i].numNodes = 0; cd[i].numEdges = 0; cd[i].external = 0; cd[i].id = i; //cd[i].nodeList = NULL; } /* Position of the node in the community */ node_t *commpos = (node_t*) malloc (G->numNodes * sizeof(node_t)); #ifdef DEBUG_ON printf("Start the data collection process \n"); /** * IMPORTANT: There is a high probability that in the input graph * the adject nodes are in the same cluster. * As a result, we first compare the previous nodes's * cluster ID. **/ #endif for(node_t i=0; i< G->numNodes; i++) { node_t comid = comm[i]; commpos[i] = cd[comid].numNodes; cd[comid].numNodes++; for(edge_t e = G->begin[i]; e < G->begin[i+1]; e++) { node_t end = G->node_idx[e]; if(comm[end] == comid) cd[comid].numEdges++; else cd[comid].external++; } } /* number of communities */ node_t noofComm = 0; for(node_t i=0; i< G->numNodes; i++) { if(cd[i].numNodes > 0) { if(noofComm < i) { CopyClusterDetails(cd, i, noofComm); } #ifdef DEBUG_ON assert(cd[i].numNodes == cd[noofComm].numNodes); // printf("The node id %d numNodes %d i val is %d and i numNodes is %d \n", noofComm, cd[noofComm].numNodes, i , cd[i].numNodes); assert(cd[i].numNodes > 0); // exit(0); #endif noofComm++; } } #ifdef DEBUG_ON printf("The number of clusters is %d \n ", noofComm); /* for(node_t debclusters = 0; debclusters < noofComm; debclusters++) { */ /* printf("The cluster id is %d and number of nodes in cluster is %d \n", cd[debclusters].id, cd[debclusters].numNodes); */ /* } */ #endif gettimeofday(&end, NULL); printf("data collection The required time is %f \n",((end.tv_sec - start.tv_sec)*1000 + ((double)(end.tv_usec - start.tv_usec))/1000)); /* The community with highest diffrenece between internal nodes and external nodes per node will be psuhed to start. */ gettimeofday(&start, NULL); /*community positions*/ int* commDetPointers = (int*) malloc (noofComm * sizeof(int)); coeff = (double*) malloc (noofComm * sizeof(double)); #pragma omp parallel for for(node_t i=0; i < noofComm; i++) { #ifdef DEBUG_ON // printf("The node id %d numNodes %d \n",i , cd[i].numNodes); assert(cd[i].numNodes > 0); #endif commDetPointers[i] = i; coeff[i] = ((double)cd[i].numEdges)/cd[i].numNodes; } sort_serial(commDetPointers, coeff, 0, noofComm); free(coeff); gettimeofday(&end, NULL); printf("sorting The required time is %f \n",((end.tv_sec - start.tv_sec)*1000 + ((double)(end.tv_usec - start.tv_usec))/1000)); /* Decide on destinations */ gettimeofday(&start, NULL); node_t commEnd = 0; node_t commStart = 0; /* These are the start and end indexes of graph Map */ for(node_t c =0; c<noofComm; c++) { node_t cid = commDetPointers[c]; cd[cid].commDistance = commStart; assert(cd[cid].numNodes > 0); commStart += cd[cid].numNodes; } assert(commStart == G->numNodes); /** Collect the nodes of a cluster together **/ node_t* clusterednodes = (node_t*) malloc (G->numNodes * sizeof(node_t)); node_t* clusterPositions = (node_t*) malloc (G->numNodes * sizeof(node_t)); #pragma omp parallel for for(node_t c=0; c< noofComm;c++) { clusterPositions[cd[c].id] = c; } #ifdef DEBUG_ON #pragma omp parallel for for(node_t i =0;i< G->numNodes;i++) { clusterednodes[i] = NIL_NODE; } for(node_t c; c< noofComm; c++) { assert(clusterPositions[cd[c].id] == c); } #endif for(node_t i =0;i< G->numNodes;i++) { node_t nodeCluster = comm[i]; node_t clusterPos = clusterPositions[nodeCluster]; node_t nodePos = cd[clusterPos].commDistance + commpos[i]; #ifdef DEBUG_ON // printf("The node id %d community id %d (%d), community start is %d nodePos = %d clusterednode %d \n",i,comm[i], cd[clusterPos].id, cd[clusterPos].commDistance, nodePos, clusterednodes[nodePos]); assert(cd[clusterPos].id == comm[i]); assert(clusterednodes[nodePos] == NIL_NODE); #endif clusterednodes[nodePos] = i; } free(clusterPositions); #pragma omp parallel { int* visitedlist = (int*) malloc (cd[0].numNodes * sizeof(int)); node_t * commlist = (node_t*) malloc (cd[0].numNodes * sizeof(node_t)); node_t* stacklist = (node_t*) malloc (cd[0].numNodes * sizeof(node_t)); node_t commMax = cd[0].numNodes; #pragma omp for for(node_t c = 0; c< noofComm; c++) { node_t cid = commDetPointers[c]; node_t myCommunitySize = cd[cid].numNodes; node_t commId = cd[cid].id; node_t myStart = cd[cid].commDistance; if(myCommunitySize > commMax) { commlist = (node_t*) realloc(commlist, myCommunitySize * sizeof(node_t)); stacklist = (node_t*) realloc(stacklist, myCommunitySize * sizeof(node_t)); visitedlist = (int*) realloc(visitedlist , myCommunitySize * sizeof(int)); commMax = myCommunitySize; } for(node_t i = 0; i< myCommunitySize; i++) { commlist[i] = clusterednodes[i+ myStart]; #ifdef DEBUG_ON assert(comm[commlist[i]] == commId); #endif } /*************************** TODO *************************************/ if(myCommunitySize > 3) { memset(visitedlist, 0,myCommunitySize * sizeof(int)); // BFS topologicalSort(G, commlist, stacklist, visitedlist, comm, commpos, myCommunitySize, commId); /* TODO Do a BFS after reversing the list. */ /* We might not require this exchange after the TODO */ node_t* temp = commlist; commlist = stacklist; stacklist = temp; } /* The order is set in commlist */ for(node_t i =0;i < myCommunitySize; i++){ assert(gm[myStart + i].revPos == NIL_NODE); assert(gm[commlist[i]].newPos == NIL_NODE); gm[myStart + i].revPos = commlist[i]; gm[commlist[i]].newPos = myStart + i; } } free(stacklist); free(commlist); free(visitedlist); } free(comm); free(commpos); free(cd); free(commDetPointers); free(clusterednodes); gettimeofday(&end, NULL); printf("Internal sorting the required time is %f \n",((end.tv_sec - start.tv_sec)*1000 + ((double)(end.tv_usec - start.tv_usec))/1000)); gettimeofday(&start, NULL); graph* newG = createPreprocessedGraph(G, gm); gettimeofday(&end, NULL); printf("Copy back the required time is %f \n",((end.tv_sec - start.tv_sec)*1000 + ((double)(end.tv_usec - start.tv_usec))/1000)); if(adj == 1) { /* Get the reverse edges */ createReverseEdges(newG); edge_t* r_begin = newG->r_begin; node_t* r_node_idx = newG->r_node_idx; free(newG->begin); free(newG->node_idx); newG->begin = r_begin; newG->node_idx = r_node_idx; } dumpmapping(G, gm, mapfile); /*now update edgeWeights in newG*/ if(G->weights != NULL) { int w = 0; bool found = false; edge_t pos; newG->weights = (int*) malloc(sizeof(int) * newG->numEdges); node_t x0; for (x0 = 0; x0 < G->numNodes; x0 ++) { for (edge_t y = G->begin[x0];y < G->begin[x0+1] ; y ++) { w = G->weights[y]; node_t d = G->node_idx[y]; node_t newS = gm[x0].newPos; node_t newD = gm[d].newPos; /* assert edge is added */ found = false; pos = NIL_EDGE; for(edge_t newY = newG->begin[newS]; newY < newG->begin[newS+1]; newY++) { if(newG->node_idx[newY] == newD) { found = true; pos = newY; break; } } assert(pos != NIL_EDGE); newG->weights[pos] = w; } } } free(gm); return newG; } double avgincident; double scaledT; double clusterCoeff; double aed; node_t dim; double sparsityMeasure; void writeSchema(const char *filename) { FILE *fp = fopen(filename, "w"); fprintf(fp, "Avg Adjecency = %f \n", avgincident); fprintf(fp, "Avg ClusterCoeff = %f \n",clusterCoeff); fprintf(fp, "Avg Edge Distance = %f \n", aed); fprintf(fp, "Sparsity = %0.7f \n",sparsityMeasure); fprintf(fp,"The Scaled percentange Triangles = %f\n\n", scaledT); fclose(fp); } /*** * Common entry point for all algorithms, **/ int runalgo(int argc,char** argv) { graph* G = readGraph(argv[1], argv[2]); adj = atoi(argv[6]); if(argc < 7) { const char* argList[6] = {" <inputfile> " , "graphformat.txt","<outputfile>", "<outputpropfile>", "<outputmapfilename>", "<adjecencyflag>[0/1] [no revrse/reverse]"}; printError(INCORRECT_ARG_LIST, 6, argList); return -1; } graph* newG = preprocess(G, argv[5]); avgincident = ((double) G->numEdges )/ G->numNodes; writeBackGraph(newG, argv[3]); clusterCoeff = avgClusterCoeff(G); aed = avgEdgeDistance(G); dim = diameter(G); sparsityMeasure = sparsity(G); double T = triangle_counting(G); avgClusterCoeff(newG); avgEdgeDistance(newG); diameter(newG); sparsity(newG); triangle_counting(newG); scaledT = ((double) T )/ G->numNodes; sccIndex(newG); writeSchema(argv[4]); return 0; } inline void kernel(graph *G) { }
eigen.c
// Modified from the band package. Copyright (c) 2016 Drew Schmidt #include "safeomp.h" #include <float/slapack.h> #include "Rfloat.h" #include "unroll.h" static inline void reverse_vec(const float_len_t len, float *const x) { float_len_t j = len-1; for (float_len_t i=0; i<len/2; i++) { const float tmp = x[i]; x[i] = x[j]; x[j] = tmp; j--; } } // reverse columns of a column-major matrix static inline void reverse_mat(const float_len_t m, const float_len_t n, float *const x) { float_len_t last = n - 1; for (float_len_t j=0; j<n/2; j++) { #pragma omp parallel for if(m>OMP_MIN_SIZE) for (float_len_t i=0; i<m; i++) { const float tmp = x[i + m*j]; x[i + m*j] = x[i + m*last]; x[i + m*last] = tmp; } last--; } } static inline int eig_sym_rrr(const bool inplace, const bool only_values, const int n, float *restrict x, float *restrict values, float *restrict vectors) { int jobz, info; float *x_cp; float worksize; int lwork, liwork; int *iwork, *vec_support; float *work; int nfound; if (!inplace) { x_cp = malloc(n*n * sizeof(*x_cp)); if (x_cp == NULL) { info = BADMALLOC; goto cleanup; } memcpy(x_cp, x, n*n*sizeof(float)); } else x_cp = x; if (only_values) jobz = JOBZ_N; else jobz = JOBZ_V; vec_support = malloc(2*n * sizeof(*vec_support)); if (vec_support == NULL) { info = BADMALLOC; goto cleanup; } F77_CALL(rsyevr)(&jobz, &(int){RANGE_A}, &(int){UPLO_U}, &n, x_cp, &n, &(float){0.0}, &(float){0.0}, &(int){0}, &(int){0}, &(float){0.0}, &nfound, values, vectors, &n, vec_support, &worksize, &(int){-1}, &liwork, &(int){-1}, &info); lwork = (int) worksize; work = malloc(lwork * sizeof(*work)); if (work == NULL) { free(vec_support); info = BADMALLOC; goto cleanup; } iwork = malloc(liwork * sizeof(*iwork)); if (iwork == NULL) { free(vec_support);free(work); info = BADMALLOC; goto cleanup; } F77_CALL(rsyevr)(&jobz, &(int){RANGE_A}, &(int){UPLO_U}, &n, x_cp, &n, &(float){0.0}, &(float){0.0}, &(int){0}, &(int){0}, &(float){0.0}, &nfound, values, vectors, &n, vec_support, work, &lwork, iwork, &liwork, &info); free(vec_support); free(work); free(iwork); cleanup: if (!inplace) free(x_cp); if (info == BADMALLOC) // jobz is safe, so checking against -1 necessarily means OOM THROW_MEMERR; return info; } SEXP R_symeig_spm(SEXP x, SEXP onlyvals_, SEXP descending) { SEXP ret, ret_names; SEXP values, vectors; float *vectors_pass; int ptct; int info; const float_len_t m = NROWS(x); const float_len_t n = NCOLS(x); if (m != n) error("non-square matrix in 'eigen'\n"); const int onlyvals = INT(onlyvals_); if (onlyvals) { vectors = R_NilValue; vectors_pass = NULL; ptct = 3; } else { PROTECT(vectors = newmat(n, n)); vectors_pass = DATA(vectors); ptct = 4; } PROTECT(values = newvec(n)); info = eig_sym_rrr(false, onlyvals, n, DATA(x), DATA(values), vectors_pass); if (info != 0) error("ssyevd() returned info=%d\n", info); if (INT(descending)) { reverse_vec(n, DATA(values)); if (!onlyvals) reverse_mat(n, n, DATA(vectors)); } PROTECT(ret = allocVector(VECSXP, 2)); PROTECT(ret_names = allocVector(STRSXP, 2)); SET_VECTOR_ELT(ret, 0, values); SET_VECTOR_ELT(ret, 1, vectors); SET_STRING_ELT(ret_names, 0, mkChar("values")); SET_STRING_ELT(ret_names, 1, mkChar("vectors")); setAttrib(ret, R_NamesSymbol, ret_names); UNPROTECT(ptct); return ret; }
helloworld.c
#include <stdio.h> #include <omp.h> int main(void) { int isHost = 1; #pragma omp target map(tofrom: isHost) { isHost = omp_is_initial_device(); printf("Hello world. %d\n", 100); for (int i =0; i<5; i++) { printf("Hello world. iteration %d\n", i); } } printf("Target region executed on the %s\n", isHost ? "host" : "device"); return isHost; }
GB_is_diagonal.c
//------------------------------------------------------------------------------ // GB_is_diagonal: check if A is a diagonal matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Returns true if A is a square diagonal matrix, with all diagonal entries // present. All pending tuples are ignored. Zombies are treated as entries. #include "GB_mxm.h" bool GB_is_diagonal // true if A is diagonal ( const GrB_Matrix A, // input matrix to examine GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (A != NULL) ; ASSERT_OK (GB_check (A, "A check diag", GB0)) ; //-------------------------------------------------------------------------- // trivial cases //-------------------------------------------------------------------------- int64_t n = GB_NROWS (A) ; int64_t ncols = GB_NCOLS (A) ; if (n != ncols) { // A is rectangular return (false) ; } int64_t anz = GB_NNZ (A) ; int64_t nvec = A->nvec ; if (n != anz || n != nvec) { // A must have exactly n entries in n vectors. A can be sparse or // hypersparse. If hypersparse, all vectors must be present, so // Ap has size n+1 whether sparse or hypersparse. return (false) ; } //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- // Break the work into lots of tasks so the early-exit can be exploited. GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (n, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (256 * nthreads) ; ntasks = GB_IMIN (ntasks, n) ; ntasks = GB_IMAX (ntasks, 1) ; //-------------------------------------------------------------------------- // examine each vector of A //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ai = A->i ; int diagonal = true ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { //---------------------------------------------------------------------- // check for early exit //---------------------------------------------------------------------- int diag = true ; { #pragma omp atomic read diag = diagonal ; } if (!diag) continue ; //---------------------------------------------------------------------- // check if vectors jstart:jend-1 are diagonal //---------------------------------------------------------------------- int64_t jstart, jend ; GB_PARTITION (jstart, jend, n, tid, ntasks) ; for (int64_t j = jstart ; diag && j < jend ; j++) { int64_t p = Ap [j] ; int64_t ajnz = Ap [j+1] - p ; if (ajnz != 1) { // A(:,j) must have exactly one entry diag = false ; } int64_t i = Ai [p] ; if (i != j) { // the single entry must be A(i,i) diag = false ; } } //---------------------------------------------------------------------- // early exit: tell all other tasks to halt //---------------------------------------------------------------------- if (!diag) { #pragma omp atomic write diagonal = false ; } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- if (diagonal) A->nvec_nonempty = n ; return ((bool) diagonal) ; }
gbdt.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <LightGBM/cuda/vector_cudahost.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/threading.h> #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include "score_updater.hpp" namespace LightGBM { using json11::Json; /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param start_iteration Start index of the iteration to predict * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_pred_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, max_iteration); if (num_iteration > 0) { num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration)); } else { num_pred_in_one_row *= (max_iteration - start_iteration); } } else if (is_pred_contrib) { num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_pred_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output) const override; void PredictContribByMap(const std::unordered_map<int, double>& features, std::vector<std::unordered_map<int, double>>* output) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration, int feature_importance_type) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int start_iteration, int num_iterations, int feature_importance_type, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Non-empty string if succeeded */ std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Calculate upper bound value * \return upper bound value */ double GetUpperBoundValue() const override; /*! * \brief Calculate lower bound value * \return lower bound value */ double GetLowerBoundValue() const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, num_iteration_for_pred_); if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration); } else { num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration; } start_iteration_for_pred_ = start_iteration; if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char* SubModelName() const override { return "tree"; } bool IsLinear() const override { return linear_tree_; } protected: virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) { if (objective_function != nullptr) { return objective_function->IsConstantHessian(); } else { return false; } } /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; #ifdef USE_CUDA /*! \brief First order derivative of training data */ std::vector<score_t, CHAllocator<score_t>> gradients_; /*! \brief Second order derivative of training data */ std::vector<score_t, CHAllocator<score_t>> hessians_; #else /*! \brief First order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_; /*! \brief Second order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_; #endif /*! \brief Store the indices of in-bag data */ std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Start iteration of used model */ int start_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; const int bagging_rand_block_ = 1024; std::vector<Random> bagging_rands_; ParallelPartitionRunner<data_size_t, false> bagging_runner_; Json forced_splits_json_; bool linear_tree_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
tls-2.c
/* { dg-do compile } */ /* { dg-require-effective-target tls } */ extern char buf[]; #pragma omp threadprivate (buf) /* { dg-error "has incomplete type" } */ void foo (void) { int i; #pragma omp threadprivate (i) /* { dg-error "automatic variable" } */ i = 0; }
ddcMalloc.c
// $Id$ #define _XOPEN_SOURCE 600 #include "ddcMalloc.h" #include "mpiUtils.h" #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <libgen.h> #include <string.h> #ifndef __APPLE__ #include <malloc.h> #endif #ifdef WITH_PIO #include "pio.h" #endif static int addBlock(void* ptr, size_t size, char* location); static int updateBlock(void* old_ptr, void* new_ptr, size_t size, char* location); static int freeBlock(void* ptr); static int findBlock(void* ptr); static void printHeapInfo(FILE* file); #ifdef WITH_PIO static void printHeapInfo_pio(PFILE* file); #endif static int _verboseTask = -1; typedef struct memBlock_st { size_t size; void* ptr; char location[40]; } MEMBLOCK; #define MAX_BLOCK 64000 static MEMBLOCK _block[MAX_BLOCK]; static int _freeBlock[MAX_BLOCK]; static int _nextBlock = MAX_BLOCK +1; static size_t _memUsed = 0; static size_t _peakUsed = 0; static int _blocksUsed = 0; const double b2mb=1024*1024; void ddcMemSetVerbose(int task) { _verboseTask = task; } void ddcMemInit(void) { for (unsigned ii=0; ii<MAX_BLOCK; ++ii) { _freeBlock[ii] = ii; _block[ii].size = 0; _block[ii].ptr = NULL; _block[ii].location[0] = '\0'; } _nextBlock = 0; } void ddcMemSummary(FILE* file) { fprintf(file, "ddcMem task %d: peak=%7.2fMB current=%7.2fMB in %d blocks\n", getRank(0), _peakUsed/b2mb, _memUsed/b2mb, _blocksUsed); } #ifdef WITH_PIO void ddcMemSummary_pio(PFILE* file) { Pprintf(file, "ddcMem task %d: peak=%7.2fMB current=%7.2fMB in %d blocks\n", getRank(0), _peakUsed/b2mb, _memUsed/b2mb, _blocksUsed); } #endif void ddcMemReport(FILE* file) { size_t totalSize = 0; fprintf(file, "ddcMem report for task %d\n\n", getRank(0)); fprintf(file, "Block ptr size location\n"); fprintf(file, "=======================================================================\n"); for (unsigned ii=0; ii<MAX_BLOCK; ++ii) { if (_block[ii].ptr == NULL) continue; fprintf(file, "%5d: %10p %12zuk %s\n", ii, _block[ii].ptr, _block[ii].size/1024, _block[ii].location); totalSize += _block[ii].size; } fprintf(file, "\nTotal size = %f MB\n", totalSize/b2mb); fprintf(file, "Peak size = %f MB\n\n", _peakUsed/b2mb); printHeapInfo(file); } #ifdef WITH_PIO void ddcMemReport_pio(PFILE* file) { size_t totalSize = 0; Pprintf(file, "ddcMem report for task %d\n\n", getRank(0)); Pprintf(file, "Block ptr size location\n"); Pprintf(file, "=======================================================================\n"); for (unsigned ii=0; ii<MAX_BLOCK; ++ii) { if (_block[ii].ptr == NULL) continue; Pprintf(file, "%5d: 0x%08x %12ik %s\n", ii, _block[ii].ptr, _block[ii].size/1024, _block[ii].location); totalSize += _block[ii].size; } Pprintf(file, "\nTotal size = %f MB\n", totalSize/b2mb); Pprintf(file, "Peak size = %f MB\n\n", _peakUsed/b2mb); printHeapInfo_pio(file); } #endif /** Implementation Note: Some implementations of malloc (such a purple) * return a null pointer when called with a size of zero. This is * allowed by the POSIX standard. It is entirely likely that we will * call malloc with zero size since some tasks may logically have zero * of some items such as fftChannels. * * We don't want malloc to return NULL for two reasons: First, we want * to use a NULL return as a sign that malloc has failed. Second, we * would like a unique pointer for each call to malloc so that we can * add it to the block table. If two entries in the block table have * the same pointer they become indistinguishable when we try to free * the pointer (since only the pointer is passed to ddcFree). * * To work around this problem we check for a zero size and always * allocate at least sizeof(void*). This way a NULL return is a true * error and we get a unique pointer value to add to the block table. */ void* _ddcMalloc(size_t size, char* location) { if (size == 0) size = sizeof(void*); void* ptr = malloc(size); if (!ptr) { printf("mem: ddcMalloc failed on task %d (%zu bytes at %s)\n" " memUsed=%8.2f\n", getRank(0), size, location, _memUsed/b2mb); printHeapInfo(stdout); } else { int b = addBlock(ptr, size, location); if (_verboseTask == getRank(0)) { printf("mem: task %d block %d malloc %10p (%zu bytes at %s) total %8.2f\n", getRank(0), b, ptr, size, location, _memUsed/b2mb); printHeapInfo(stdout); } } return ptr; } /** For the same reasons as explained above _ddcMalloc we check for zero * size allocations and ensure at least a little memory is allocated. */ void* _ddcCalloc(size_t count, size_t size, char* location) { if (count == 0) count = 1; if (size == 0) size = sizeof(void*); void* ptr = calloc(count, size); if (!ptr) { printf("mem: ddcCalloc failed on task %d (%zu bytes at %s\n" " memUsed=%8.2f\n", getRank(0), size*count, location, _memUsed/b2mb); printHeapInfo(stdout); } else { int b = addBlock(ptr, size*count, location); if (_verboseTask == getRank(0)) printf("mem: task %d block %d calloc %10p (%zu bytes at %s) total %8.2f\n", getRank(0), b, ptr, size*count, location, _memUsed/b2mb); } return ptr; } /** For the same reasons as explained above _ddcMalloc we check for zero * size allocations and ensure at least a little memory is allocated. * By POSIX, if size is zero and ptr is non-null the object is freed.*/ void* _ddcRealloc(void* ptr, size_t size, char* location) { if (size == 0 && ptr == NULL) size = sizeof(void*); if (size == 0 && ptr != NULL) { _ddcFree(ptr, location); return NULL; } void* old_ptr = ptr; void* new_ptr = realloc(ptr, size); if (!new_ptr) { printf("mem: ddcRealloc failed on task %d (%zu bytes at %s)\n" " ptr=%10p\n" " memUsed=%8.2f\n", getRank(0), size, location, ptr, _memUsed/b2mb); printHeapInfo(stdout); } else { int b = updateBlock(old_ptr, new_ptr, size, location); if (_verboseTask == getRank(0)) printf("mem: task %d block %d realloc %10p (%zu bytes at %s) total %8.2f\n", getRank(0), b, ptr, size, location, _memUsed/b2mb); } return new_ptr; } /** For the same reasons as explained above _ddcMalloc we check for zero * size allocations and ensure at least a little memory is allocated. */ int _ddcMallocAligned(void** ptr, size_t alignment, size_t size, char* location) { if (size == 0) size = sizeof(void*); int retVal = _mallocAligned(ptr, alignment, size); if (!*ptr) { printf("mem: ddcMallocAligned failed on task %d (%zu bytes at %s)\n" " memUsed=%8.2f\n", getRank(0), size, location, _memUsed/b2mb); printHeapInfo(stdout); } else { int b = addBlock(ptr, size, location); if (_verboseTask == getRank(0)) { printf("mem: task %d block %d mallocAligned %10p (%zu bytes at %s) total %8.2f\n", getRank(0), b, ptr, size, location, _memUsed/b2mb); printHeapInfo(stdout); } } return retVal; } void _ddcFree(void* ptr, const char* location) { free(ptr); int b = freeBlock(ptr); if (_verboseTask == getRank(0)) printf("mem: task %d block %d free %10p at %s total %8.2f\n", getRank(0), b, ptr, location, _memUsed/b2mb); } char* _ddcLine(const char* file, int lineNum) { static char buffer[256]; sprintf(buffer, "%s:%d", file, lineNum); return buffer; } int addBlock(void* ptr, size_t size, char* location) { assert(ptr != NULL); int here; #pragma omp critical (ddcMalloc_addBlock) { if (_nextBlock == MAX_BLOCK+1) ddcMemInit(); here = _freeBlock[_nextBlock]; _block[here].ptr = ptr; _block[here].size = size; _block[here].location[0] = '\0'; strncat(_block[here].location, basename(location),39); ++_blocksUsed; _memUsed += size; if (_memUsed > _peakUsed) _peakUsed = _memUsed; ++_nextBlock; if (_nextBlock == MAX_BLOCK) { printf("Block storage exhausted on task %d in addBlock.\n" "%s\n" "Try increasing MAX_BLOCK\n", getRank(0),location); exit(3); } } return here; } int updateBlock(void* old_ptr, void* new_ptr, size_t size, char* location) { if (old_ptr == NULL) return addBlock(new_ptr, size, location); int here = findBlock(old_ptr); if (here < 0) { /* printf("Error in updateBlock on task %d\n" */ /* " old_ptr=%08x not found in block table.\n" */ /* " new_ptr=%08x\n" */ /* " %d bytes at %s\n", */ /* getRank(0), old_ptr, new_ptr, size, location); */ return addBlock(new_ptr, size, location); } #pragma omp critical (ddcMalloc_updateBlock) { _memUsed += (size - _block[here].size); if (_memUsed > _peakUsed) _peakUsed = _memUsed; _block[here].ptr = new_ptr; _block[here].size = size; _block[here].location[0] = '\0'; strncat(_block[here].location, basename(location),39); } return here; } int freeBlock(void* ptr) { if (ptr == NULL) return -2; int here = findBlock(ptr); #pragma omp critical (ddcMalloc_freeBlock) { if (here >= 0) { --_blocksUsed; _memUsed -= _block[here].size; _block[here].ptr = NULL; _block[here].size = 0; _block[here].location[0] = '\0'; --_nextBlock; assert (_nextBlock >= 0); _freeBlock[_nextBlock] = here; } /* else */ /* printf("mem: Error on Task %d. Request to free ptr 0x%08x.\n" */ /* " Pointer cannot be found in block list.\n", */ /* getRank(0), ptr); */ } return here; } int findBlock(void*ptr) { for (unsigned ii=0; ii<MAX_BLOCK; ++ii) if (_block[ii].ptr == ptr) return ii; return -1; } void printHeapInfo(FILE* file) { #ifdef __APPLE__ fprintf(file, "In routine printHeapInfo no mallinfo on OS X. Sorry.\n"); #else struct mallinfo minfo; minfo = mallinfo(); fprintf(file, "mem: task %d system heap=%fMB used=%fMB unused=%fMB\n", getRank(0), minfo.arena/(1024*1024.), minfo.uordblks/(1024*1024.), minfo.fordblks/(1024*1024.)); #endif } #ifdef WITH_PIO void printHeapInfo_pio(PFILE* file) { #ifdef __APPLE__ Pprintf(file, "No mallinfo on OS X. Sorry.\n"); #else struct mallinfo minfo; minfo = mallinfo(); Pprintf(file, "mem: task %d system heap=%fMB used=%fMB unused=%fMB\n", getRank(0), minfo.arena/(1024*1024.), minfo.uordblks/(1024*1024.), minfo.fordblks/(1024*1024.)); #endif } #endif // ifdef WITH_PIO int _mallocAligned(void** ptr, size_t alignment, size_t size) { #ifndef __APPLE__ return posix_memalign(ptr, alignment, size); #else *ptr = malloc(size); if (*ptr == NULL) return 1; return 0; #endif } void freeNull(void **ptr) { if ( *ptr != NULL ) { free(*ptr); *ptr = NULL; } }
tree.h
#ifndef LIGHTGBM_TREE_H_ #define LIGHTGBM_TREE_H_ #include <LightGBM/meta.h> #include <LightGBM/dataset.h> #include <string> #include <vector> #include <memory> #include <map> namespace LightGBM { #define kCategoricalMask (1) #define kDefaultLeftMask (2) /*! * \brief Tree model */ class Tree { public: /*! * \brief Constructor * \param max_leaves The number of max leaves */ explicit Tree(int max_leaves); /*! * \brief Construtor, from a string * \param str Model string * \param used_len used count of str */ Tree(const char* str, size_t* used_len); ~Tree(); /*! * \brief Performing a split on tree leaves. * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split * \param threshold_double Threshold on feature value * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param gain Split gain * \param missing_type missing type * \param default_left default direction for missing value * \return The index of new leaf. */ int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin, double threshold_double, double left_value, double right_value, int left_cnt, int right_cnt, float gain, MissingType missing_type, bool default_left); /*! * \brief Performing a split on tree leaves, with categorical feature * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split, use bitset to represent * \param num_threshold_bin size of threshold_bin * \param threshold Thresholds of real feature value, use bitset to represent * \param num_threshold size of threshold * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param gain Split gain * \return The index of new leaf. */ int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin, const uint32_t* threshold, int num_threshold, double left_value, double right_value, int left_cnt, int right_cnt, float gain, MissingType missing_type); /*! \brief Get the output of one leaf */ inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; } /*! \brief Set the output of one leaf */ inline void SetLeafOutput(int leaf, double output) { leaf_value_[leaf] = output; } /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, data_size_t num_data, double* score) const; /*! * \brief Adding prediction value of this tree model to scorese * \param data The dataset * \param used_data_indices Indices of used data * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, const data_size_t* used_data_indices, data_size_t num_data, double* score) const; /*! * \brief Prediction on one record * \param feature_values Feature value of this record * \return Prediction result */ inline double Predict(const double* feature_values) const; inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const; inline int PredictLeafIndex(const double* feature_values) const; inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const; inline void PredictContrib(const double* feature_values, int num_features, double* output); /*! \brief Get Number of leaves*/ inline int num_leaves() const { return num_leaves_; } /*! \brief Get depth of specific leaf*/ inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; } /*! \brief Get feature of specific split*/ inline int split_feature(int split_idx) const { return split_feature_[split_idx]; } inline double split_gain(int split_idx) const { return split_gain_[split_idx]; } /*! \brief Get the number of data points that fall at or below this node*/ inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; } /*! * \brief Shrinkage for the tree's output * shrinkage rate (a.k.a learning rate) is used to tune the traning process * \param rate The factor of shrinkage */ inline void Shrinkage(double rate) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_; ++i) { leaf_value_[i] *= rate; } shrinkage_ *= rate; } inline double shrinkage() const { return shrinkage_; } inline void AddBias(double val) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_; ++i) { leaf_value_[i] = val + leaf_value_[i]; } // force to 1.0 shrinkage_ = 1.0f; } inline void AsConstantTree(double val) { num_leaves_ = 1; shrinkage_ = 1.0f; leaf_value_[0] = val; } /*! \brief Serialize this object to string*/ std::string ToString() const; /*! \brief Serialize this object to json*/ std::string ToJSON() const; /*! \brief Serialize this object to if-else statement*/ std::string ToIfElse(int index, bool predict_leaf_index) const; inline static bool IsZero(double fval) { if (fval > -kZeroThreshold && fval <= kZeroThreshold) { return true; } else { return false; } } inline static bool GetDecisionType(int8_t decision_type, int8_t mask) { return (decision_type & mask) > 0; } inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) { if (input) { (*decision_type) |= mask; } else { (*decision_type) &= (127 - mask); } } inline static int8_t GetMissingType(int8_t decision_type) { return (decision_type >> 2) & 3; } inline static void SetMissingType(int8_t* decision_type, int8_t input) { (*decision_type) &= 3; (*decision_type) |= (input << 2); } void RecomputeMaxDepth(); private: std::string NumericalDecisionIfElse(int node) const; std::string CategoricalDecisionIfElse(int node) const; inline int NumericalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if (std::isnan(fval)) { if (missing_type != 2) { fval = 0.0f; } } if ((missing_type == 1 && IsZero(fval)) || (missing_type == 2 && std::isnan(fval))) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if ((missing_type == 1 && fval == default_bin) || (missing_type == 2 && fval == max_bin)) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_in_bin_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int CategoricalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); int int_fval = static_cast<int>(fval); if (int_fval < 0) { return right_child_[node];; } else if (std::isnan(fval)) { // NaN is always in the right if (missing_type == 2) { return right_child_[node]; } int_fval = 0; } int cat_idx = int(threshold_[node]); if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx], cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) { return left_child_[node]; } return right_child_[node]; } inline int CategoricalDecisionInner(uint32_t fval, int node) const { int cat_idx = int(threshold_in_bin_[node]); if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx], cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) { return left_child_[node]; } return right_child_[node]; } inline int Decision(double fval, int node) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecision(fval, node); } else { return NumericalDecision(fval, node); } } inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecisionInner(fval, node); } else { return NumericalDecisionInner(fval, node, default_bin, max_bin); } } inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, float gain); /*! * \brief Find leaf index of which record belongs by features * \param feature_values Feature value of this record * \return Leaf index */ inline int GetLeaf(const double* feature_values) const; inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const; /*! \brief Serialize one node to json*/ std::string NodeToJSON(int index) const; /*! \brief Serialize one node to if-else statement*/ std::string NodeToIfElse(int index, bool predict_leaf_index) const; std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const; double ExpectedValue() const; /*! \brief This is used fill in leaf_depth_ after reloading a model*/ inline void RecomputeLeafDepths(int node = 0, int depth = 0); /*! * \brief Used by TreeSHAP for data we keep about our decision path */ struct PathElement { int feature_index; double zero_fraction; double one_fraction; // note that pweight is included for convenience and is not tied with the other attributes, // the pweight of the i'th path element is the permuation weight of paths with i-1 ones in them double pweight; PathElement() {} PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {} }; /*! \brief Polynomial time algorithm for SHAP values (https://arxiv.org/abs/1706.06060)*/ void TreeSHAP(const double *feature_values, double *phi, int node, int unique_depth, PathElement *parent_unique_path, double parent_zero_fraction, double parent_one_fraction, int parent_feature_index) const; /*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/ static void ExtendPath(PathElement *unique_path, int unique_depth, double zero_fraction, double one_fraction, int feature_index); /*! \brief Undo a previous extension of the decision path for TreeSHAP*/ static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index); /*! determine what the total permuation weight would be if we unwound a previous extension in the decision path*/ static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index); /*! \brief Number of max leaves*/ int max_leaves_; /*! \brief Number of current levas*/ int num_leaves_; // following values used for non-leaf node /*! \brief A non-leaf node's left child */ std::vector<int> left_child_; /*! \brief A non-leaf node's right child */ std::vector<int> right_child_; /*! \brief A non-leaf node's split feature */ std::vector<int> split_feature_inner_; /*! \brief A non-leaf node's split feature, the original index */ std::vector<int> split_feature_; /*! \brief A non-leaf node's split threshold in bin */ std::vector<uint32_t> threshold_in_bin_; /*! \brief A non-leaf node's split threshold in feature value */ std::vector<double> threshold_; int num_cat_; std::vector<int> cat_boundaries_inner_; std::vector<uint32_t> cat_threshold_inner_; std::vector<int> cat_boundaries_; std::vector<uint32_t> cat_threshold_; /*! \brief Store the information for categorical feature handle and mising value handle. */ std::vector<int8_t> decision_type_; /*! \brief A non-leaf node's split gain */ std::vector<float> split_gain_; // used for leaf node /*! \brief The parent of leaf */ std::vector<int> leaf_parent_; /*! \brief Output of leaves */ std::vector<double> leaf_value_; /*! \brief DataCount of leaves */ std::vector<int> leaf_count_; /*! \brief Output of non-leaf nodes */ std::vector<double> internal_value_; /*! \brief DataCount of non-leaf nodes */ std::vector<int> internal_count_; /*! \brief Depth for leaves */ std::vector<int> leaf_depth_; double shrinkage_; int max_depth_; }; inline void Tree::Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, float gain) { int new_node_idx = num_leaves_ - 1; // update parent info int parent = leaf_parent_[leaf]; if (parent >= 0) { // if cur node is left child if (left_child_[parent] == ~leaf) { left_child_[parent] = new_node_idx; } else { right_child_[parent] = new_node_idx; } } // add new node split_feature_inner_[new_node_idx] = feature; split_feature_[new_node_idx] = real_feature; split_gain_[new_node_idx] = Common::AvoidInf(gain); // add two new leaves left_child_[new_node_idx] = ~leaf; right_child_[new_node_idx] = ~num_leaves_; // update new leaves leaf_parent_[leaf] = new_node_idx; leaf_parent_[num_leaves_] = new_node_idx; // save current leaf value to internal node before change internal_value_[new_node_idx] = leaf_value_[leaf]; internal_count_[new_node_idx] = left_cnt + right_cnt; leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value; leaf_count_[leaf] = left_cnt; leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value; leaf_count_[num_leaves_] = right_cnt; // update leaf depth leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1; leaf_depth_[leaf]++; } inline double Tree::Predict(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline int Tree::PredictLeafIndex(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return leaf; } else { return 0; } } inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return leaf; } else { return 0; } } inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) { output[num_features] += ExpectedValue(); // Run the recursion with preallocated space for the unique path data if (num_leaves_ > 1) { CHECK(max_depth_ >= 0); const int max_path_len = max_depth_ + 1; std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2); TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1); } } inline void Tree::RecomputeLeafDepths(int node, int depth) { if (node == 0) leaf_depth_.resize(num_leaves()); if (node < 0) { leaf_depth_[~node] = depth; } else { RecomputeLeafDepths(left_child_[node], depth + 1); RecomputeLeafDepths(right_child_[node], depth + 1); } } inline int Tree::GetLeaf(const double* feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values[split_feature_[node]], node); } } else { while (node >= 0) { node = NumericalDecision(feature_values[split_feature_[node]], node); } } return ~node; } inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } else { while (node >= 0) { node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } return ~node; } } // namespace LightGBM #endif // LightGBM_TREE_H_
threading.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for * license information. */ #ifndef LIGHTGBM_UTILS_THREADING_H_ #define LIGHTGBM_UTILS_THREADING_H_ #include <LightGBM/meta.h> #include <LightGBM/utils/common.h> #include <LightGBM/utils/openmp_wrapper.h> #include <algorithm> #include <functional> #include <vector> namespace LightGBM { class Threading { public: template <typename INDEX_T> static inline void BlockInfo(INDEX_T cnt, INDEX_T min_cnt_per_block, int* out_nblock, INDEX_T* block_size) { int num_threads = OMP_NUM_THREADS(); BlockInfo<INDEX_T>(num_threads, cnt, min_cnt_per_block, out_nblock, block_size); } template <typename INDEX_T> static inline void BlockInfo(int num_threads, INDEX_T cnt, INDEX_T min_cnt_per_block, int* out_nblock, INDEX_T* block_size) { *out_nblock = std::min<int>( num_threads, static_cast<int>((cnt + min_cnt_per_block - 1) / min_cnt_per_block)); if (*out_nblock > 1) { *block_size = SIZE_ALIGNED((cnt + (*out_nblock) - 1) / (*out_nblock)); } else { *block_size = cnt; } } template <typename INDEX_T> static inline void BlockInfoForceSize(int num_threads, INDEX_T cnt, INDEX_T min_cnt_per_block, int* out_nblock, INDEX_T* block_size) { *out_nblock = std::min<int>( num_threads, static_cast<int>((cnt + min_cnt_per_block - 1) / min_cnt_per_block)); if (*out_nblock > 1) { *block_size = (cnt + (*out_nblock) - 1) / (*out_nblock); // force the block size to the times of min_cnt_per_block *block_size = (*block_size + min_cnt_per_block - 1) / min_cnt_per_block * min_cnt_per_block; } else { *block_size = cnt; } } template <typename INDEX_T> static inline int For( INDEX_T start, INDEX_T end, INDEX_T min_block_size, const std::function<void(int, INDEX_T, INDEX_T)>& inner_fun) { int n_block = 1; INDEX_T num_inner = end - start; BlockInfo<INDEX_T>(end - start, min_block_size, &n_block, &num_inner); OMP_INIT_EX(); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < n_block; ++i) { OMP_LOOP_EX_BEGIN(); INDEX_T inner_start = start + num_inner * i; INDEX_T inner_end = std::min(end, inner_start + num_inner); inner_fun(i, inner_start, inner_end); OMP_LOOP_EX_END(); } OMP_THROW_EX(); return n_block; } }; template <typename INDEX_T, bool TWO_BUFFER> class ParallelPartitionRunner { public: ParallelPartitionRunner(INDEX_T num_data, INDEX_T min_block_size) : min_block_size_(min_block_size) { num_threads_ = OMP_NUM_THREADS(); left_.resize(num_data); if (TWO_BUFFER) { right_.resize(num_data); } offsets_.resize(num_threads_); left_cnts_.resize(num_threads_); right_cnts_.resize(num_threads_); left_write_pos_.resize(num_threads_); right_write_pos_.resize(num_threads_); } ~ParallelPartitionRunner() {} void ReSize(INDEX_T num_data) { left_.resize(num_data); if (TWO_BUFFER) { right_.resize(num_data); } } template<bool FORCE_SIZE> INDEX_T Run( INDEX_T cnt, const std::function<INDEX_T(int, INDEX_T, INDEX_T, INDEX_T*, INDEX_T*)>& func, INDEX_T* out) { int nblock = 1; INDEX_T inner_size = cnt; if (FORCE_SIZE) { Threading::BlockInfoForceSize<INDEX_T>(num_threads_, cnt, min_block_size_, &nblock, &inner_size); } else { Threading::BlockInfo<INDEX_T>(num_threads_, cnt, min_block_size_, &nblock, &inner_size); } OMP_INIT_EX(); #pragma omp parallel for schedule(static, 1) num_threads(num_threads_) for (int i = 0; i < nblock; ++i) { OMP_LOOP_EX_BEGIN(); INDEX_T cur_start = i * inner_size; INDEX_T cur_cnt = std::min(inner_size, cnt - cur_start); offsets_[i] = cur_start; if (cur_cnt <= 0) { left_cnts_[i] = 0; right_cnts_[i] = 0; continue; } auto left_ptr = left_.data() + cur_start; INDEX_T* right_ptr = nullptr; if (TWO_BUFFER) { right_ptr = right_.data() + cur_start; } // split data inner, reduce the times of function called INDEX_T cur_left_count = func(i, cur_start, cur_cnt, left_ptr, right_ptr); if (!TWO_BUFFER) { // reverse for one buffer std::reverse(left_ptr + cur_left_count, left_ptr + cur_cnt); } left_cnts_[i] = cur_left_count; right_cnts_[i] = cur_cnt - cur_left_count; OMP_LOOP_EX_END(); } OMP_THROW_EX(); left_write_pos_[0] = 0; right_write_pos_[0] = 0; for (int i = 1; i < nblock; ++i) { left_write_pos_[i] = left_write_pos_[i - 1] + left_cnts_[i - 1]; right_write_pos_[i] = right_write_pos_[i - 1] + right_cnts_[i - 1]; } data_size_t left_cnt = left_write_pos_[nblock - 1] + left_cnts_[nblock - 1]; auto right_start = out + left_cnt; #pragma omp parallel for schedule(static, 1) num_threads(num_threads_) for (int i = 0; i < nblock; ++i) { std::copy_n(left_.data() + offsets_[i], left_cnts_[i], out + left_write_pos_[i]); if (TWO_BUFFER) { std::copy_n(right_.data() + offsets_[i], right_cnts_[i], right_start + right_write_pos_[i]); } else { std::copy_n(left_.data() + offsets_[i] + left_cnts_[i], right_cnts_[i], right_start + right_write_pos_[i]); } } return left_cnt; } private: int num_threads_; INDEX_T min_block_size_; std::vector<INDEX_T> left_; std::vector<INDEX_T> right_; std::vector<INDEX_T> offsets_; std::vector<INDEX_T> left_cnts_; std::vector<INDEX_T> right_cnts_; std::vector<INDEX_T> left_write_pos_; std::vector<INDEX_T> right_write_pos_; }; } // namespace LightGBM #endif // LightGBM_UTILS_THREADING_H_
CutPursuit_L2.h
#pragma once #include "CutPursuit.h" #include "Common.h" namespace CP { template <typename T> class CutPursuit_L2 : public CutPursuit<T> { public: ~CutPursuit_L2(){ }; //============================================================================================= //============================= COMPUTE ENERGY =========================================== //============================================================================================= virtual std::pair<T,T> compute_energy() override { VertexAttributeMap<T> vertex_attribute_map = boost::get(boost::vertex_bundle, this->main_graph); EdgeAttributeMap<T> edge_attribute_map = boost::get(boost::edge_bundle, this->main_graph); //the first element pair_energy of is the fidelity and the second the penalty std::pair<T,T> pair_energy; T energy = 0; //#pragma omp parallel for private(i_dim) if (this->parameter.parallel) schedule(static) reduction(+:energy,i) for (uint32_t ind_ver = 0; ind_ver < this->nVertex; ind_ver++) { VertexDescriptor<T> i_ver = boost::vertex(ind_ver, this->main_graph); for(uint32_t i_dim=0; i_dim<this->dim; i_dim++) { energy += .5*vertex_attribute_map(i_ver).weight * pow(vertex_attribute_map(i_ver).observation[i_dim] - vertex_attribute_map(i_ver).value[i_dim],2); } } pair_energy.first = energy; energy = 0; EdgeIterator<T> i_edg, i_edg_end = boost::edges(this->main_graph).second; for (i_edg = boost::edges(this->main_graph).first; i_edg != i_edg_end; ++i_edg) { if (!edge_attribute_map(*i_edg).realEdge) { continue; } energy += .5 * edge_attribute_map(*i_edg).isActive * this->parameter.reg_strenth * edge_attribute_map(*i_edg).weight; } pair_energy.second = energy; return pair_energy; } //============================================================================================= //============================= SPLIT =========================================== //============================================================================================= virtual uint32_t split() override { // split the graph by trying to find the best binary partition // each components is split into B and notB // for each components we associate the value h_1 and h_2 to vertices in B or notB // the affectation as well as h_1 and h_2 are computed alternatively //tic(); //--------loading structures--------------------------------------------------------------- uint32_t nb_comp = this->components.size(); VertexAttributeMap<T> vertex_attribute_map = boost::get(boost::vertex_bundle, this->main_graph); VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph); uint32_t saturation; //stores wether each vertex is B or not std::vector<bool> binary_label(this->nVertex); //initialize the binary partition with kmeans this->init_labels(binary_label); //centers is the value of each binary component in the optimal partition VectorOfCentroids<T> centers(nb_comp, this->dim); //-----main loop---------------------------------------------------------------- // the optimal flow is iteratively approximated for (uint32_t i_step = 1; i_step <= this->parameter.flow_steps; i_step++) { //the regularization strength at this step //compute h_1 and h_2 centers = VectorOfCentroids<T>(nb_comp, this->dim); this->compute_centers(centers, nb_comp,binary_label); this->set_capacities(centers); // update the capacities of the flow graph boost::boykov_kolmogorov_max_flow( this->main_graph, get(&EdgeAttribute<T>::capacity , this->main_graph), get(&EdgeAttribute<T>::residualCapacity, this->main_graph), get(&EdgeAttribute<T>::edge_reverse , this->main_graph), get(&VertexAttribute<T>::color , this->main_graph), get(boost::vertex_index , this->main_graph), this->source, this->sink); for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++) { if (this->saturated_components[ind_com]) { continue; } for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++) { binary_label[vertex_index_map(this->components[ind_com][i_ver])] = (vertex_attribute_map(this->components[ind_com][i_ver]).color == vertex_attribute_map(this->sink).color); } } } saturation = this->activate_edges(); return saturation; } //============================================================================================= //============================= INIT_L2 ====== =========================================== //============================================================================================= inline void init_labels(std::vector<bool> & binary_label) { //-----initialize the labelling for each components with kmeans------------------------------ VertexAttributeMap<T> vertex_attribute_map = boost::get(boost::vertex_bundle, this->main_graph); VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph); uint32_t nb_comp = this->components.size(); // ind_com; //#pragma omp parallel for private(ind_com) //if (nb_comp>=8) schedule(dynamic) #ifdef OPENMP #pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic) #endif for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++) { std::vector< std::vector<T> > kernels(2, std::vector<T>(this->dim)); T total_weight[2]; T best_energy; T current_energy; uint32_t comp_size = this->components[ind_com].size(); std::vector<bool> potential_label(comp_size); std::vector<T> energy_array(comp_size); if (this->saturated_components[ind_com] || comp_size <= 1) { continue; } for (uint32_t init_kmeans = 0; init_kmeans < this->parameter.kmeans_resampling; init_kmeans++) {//proceed to several initilialisation of kmeans and pick up the best one //----- initialization with KM++ ------------------ uint32_t first_kernel = std::rand() % comp_size, second_kernel = 0; // first kernel attributed for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { kernels[0][i_dim] = vertex_attribute_map(this->components[ind_com][first_kernel ]).observation[i_dim]; } best_energy = 0; //now compute the square distance of each pouint32_tto this kernel #ifdef OPENMP #pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(best_energy) schedule(static) #endif for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++) { energy_array[i_ver] = 0; for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { energy_array[i_ver] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] - kernels[0][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight; } best_energy += energy_array[i_ver]; } // we now generate a random number to determinate which node will be the second kernel T random_sample = ((T)(rand())) / ((T)(RAND_MAX)); current_energy = best_energy * random_sample; for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++) { current_energy -= energy_array[i_ver]; if (current_energy < 0) { //we have selected the second kernel second_kernel = i_ver; break; } } for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { // now fill the second kernel kernels[1][i_dim] = vertex_attribute_map(this->components[ind_com][second_kernel]).observation[i_dim]; } //----main kmeans loop----- for (uint32_t ite_kmeans = 0; ite_kmeans < this->parameter.kmeans_ite; ite_kmeans++) { //--affectation step: associate each node with its closest kernel------------------- #ifdef OPENMP #pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static) #endif for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++) { std::vector<T> distance_kernels(2); for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { distance_kernels[0] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] - kernels[0][i_dim],2); distance_kernels[1] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] - kernels[1][i_dim],2); } potential_label[i_ver] = distance_kernels[0] > distance_kernels[1]; } //-----computation of the new kernels---------------------------- total_weight[0] = 0.; total_weight[1] = 0.; for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { kernels[0][i_dim] = 0; kernels[1][i_dim] = 0; } #ifdef OPENMP #pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static) #endif for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++) { if (vertex_attribute_map(this->components[ind_com][i_ver]).weight==0) { continue; } if (potential_label[i_ver]) { total_weight[0] += vertex_attribute_map(this->components[ind_com][i_ver]).weight; for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { kernels[0][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] * vertex_attribute_map(this->components[ind_com][i_ver]).weight ; } } else { total_weight[1] += vertex_attribute_map(this->components[ind_com][i_ver]).weight; for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { kernels[1][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] * vertex_attribute_map(this->components[ind_com][i_ver]).weight; } } } if ((total_weight[0] == 0)||(total_weight[1] == 0)) { //std::cout << "kmeans error : " << comp_size << std::endl; break; } for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { kernels[0][i_dim] = kernels[0][i_dim] / total_weight[0]; kernels[1][i_dim] = kernels[1][i_dim] / total_weight[1]; } } //----compute the associated energy ------ current_energy = 0; #ifdef OPENMP #pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static) #endif for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++) { for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { if (potential_label[i_ver]) { current_energy += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] - kernels[0][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight; } else { current_energy += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] - kernels[1][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight; } } } if (current_energy < best_energy) { best_energy = current_energy; for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++) { binary_label[vertex_index_map(this->components[ind_com][i_ver])] = potential_label[i_ver]; } } } } } //============================================================================================= //============================= COMPUTE_CENTERS_L2 ========================================== //============================================================================================= inline void compute_centers(VectorOfCentroids<T> & centers, const uint32_t & nb_comp , const std::vector<bool> & binary_label) { //compute for each component the values of h_1 and h_2 #ifdef OPENMP #pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic) #endif for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++) { if (this->saturated_components[ind_com]) { continue; } compute_center(centers.centroids[ind_com], ind_com, binary_label); } return; } //============================================================================================= //============================= COMPUTE_CENTERS_L2 ========================================== //============================================================================================= inline void compute_center( std::vector< std::vector<T> > & center, const uint32_t & ind_com , const std::vector<bool> & binary_label) { //compute for each component the values of the centroids corresponding to the optimal binary partition VertexAttributeMap<T> vertex_attribute_map = boost::get(boost::vertex_bundle, this->main_graph); VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph); T total_weight[2]; total_weight[0] = 0.; total_weight[1] = 0.; //#pragma omp parallel for if (this->parameter.parallel) for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++) { if (vertex_attribute_map(this->components[ind_com][i_ver]).weight==0) { continue; } if (binary_label[vertex_index_map(this->components[ind_com][i_ver])]) { total_weight[0] += vertex_attribute_map(this->components[ind_com][i_ver]).weight; for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { center[0][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] * vertex_attribute_map(this->components[ind_com][i_ver]).weight ; } } else { total_weight[1] += vertex_attribute_map(this->components[ind_com][i_ver]).weight; for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { center[1][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] * vertex_attribute_map(this->components[ind_com][i_ver]).weight; } } } if ((total_weight[0] == 0)||(total_weight[1] == 0)) { //the component is saturated this->saturateComponent(ind_com); for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { center[0][i_dim] = vertex_attribute_map(this->components[ind_com][0]).value[i_dim]; center[1][i_dim] = vertex_attribute_map(this->components[ind_com][0]).value[i_dim]; } } else { for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { center[0][i_dim] = center[0][i_dim] / total_weight[0]; center[1][i_dim] = center[1][i_dim] / total_weight[1]; } } return; } //============================================================================================= //============================= SET_CAPACITIES ========================================== //============================================================================================= inline void set_capacities(const VectorOfCentroids<T> & centers) { VertexAttributeMap<T> vertex_attribute_map = boost::get(boost::vertex_bundle, this->main_graph); EdgeAttributeMap<T> edge_attribute_map = boost::get(boost::edge_bundle, this->main_graph); //----first compute the capacity in sink/node edges------------------------------------ //#pragma omp parallel for if (this->parameter.parallel) schedule(dynamic) uint32_t nb_comp = this->components.size(); #ifdef OPENMP #pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic) #endif for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++) { VertexDescriptor<T> desc_v; EdgeDescriptor desc_source2v, desc_v2sink, desc_v2source; T cost_B, cost_notB; //the cost of being in B or not B, local for each component if (this->saturated_components[ind_com]) { continue; } for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++) { desc_v = this->components[ind_com][i_ver]; // because of the adjacency structure NEVER access edge (source,v) directly! desc_v2source = boost::edge(desc_v, this->source,this->main_graph).first; desc_source2v = edge_attribute_map(desc_v2source).edge_reverse; //use edge_reverse instead desc_v2sink = boost::edge(desc_v, this->sink,this->main_graph).first; cost_B = 0; cost_notB = 0; if (vertex_attribute_map(desc_v).weight==0) { //no observation - no cut edge_attribute_map(desc_source2v).capacity = 0; edge_attribute_map(desc_v2sink).capacity = 0; continue; } for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { cost_B += 0.5*vertex_attribute_map(desc_v).weight * (pow(centers.centroids[ind_com][0][i_dim],2) - 2 * (centers.centroids[ind_com][0][i_dim] * vertex_attribute_map(desc_v).observation[i_dim])); cost_notB += 0.5*vertex_attribute_map(desc_v).weight * (pow(centers.centroids[ind_com][1][i_dim],2) - 2 * (centers.centroids[ind_com][1][i_dim] * vertex_attribute_map(desc_v).observation[i_dim])); } if (cost_B>cost_notB) { edge_attribute_map(desc_source2v).capacity = cost_B - cost_notB; edge_attribute_map(desc_v2sink).capacity = 0.; } else { edge_attribute_map(desc_source2v).capacity = 0.; edge_attribute_map(desc_v2sink).capacity = cost_notB - cost_B; } } } //----then set the vertex to vertex edges --------------------------------------------- EdgeIterator<T> i_edg, i_edg_end; for (boost::tie(i_edg, i_edg_end) = boost::edges(this->main_graph); i_edg != i_edg_end; ++i_edg) { if (!edge_attribute_map(*i_edg).realEdge) { continue; } if (!edge_attribute_map(*i_edg).isActive) { edge_attribute_map(*i_edg).capacity = edge_attribute_map(*i_edg).weight * this->parameter.reg_strenth; } else { edge_attribute_map(*i_edg).capacity = 0; } } } //============================================================================================= //================================= COMPUTE_VALUE ========================================= //============================================================================================= virtual std::pair<std::vector<T>, T> compute_value(const uint32_t & ind_com) override { VertexAttributeMap<T> vertex_attribute_map = boost::get(boost::vertex_bundle, this->main_graph); T total_weight = 0; std::vector<T> compValue(this->dim); std::fill((compValue.begin()),(compValue.end()),0); #ifdef OPENMP #pragma omp parallel for if (this->parameter.parallel) schedule(static) #endif for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver) { total_weight += vertex_attribute_map(this->components[ind_com][ind_ver]).weight; for(uint32_t i_dim=0; i_dim<this->dim; i_dim++) { compValue[i_dim] += vertex_attribute_map(this->components[ind_com][ind_ver]).observation[i_dim] * vertex_attribute_map(this->components[ind_com][ind_ver]).weight; } vertex_attribute_map(this->components[ind_com][ind_ver]).in_component = ind_com; } for(uint32_t i_dim=0; i_dim<this->dim; i_dim++) { compValue[i_dim] = compValue[i_dim] / total_weight; } for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver) { for(uint32_t i_dim=0; i_dim<this->dim; i_dim++) { vertex_attribute_map(this->components[ind_com][ind_ver]).value[i_dim] = compValue[i_dim]; } } return std::pair<std::vector<T>, T>(compValue, total_weight); } //============================================================================================= //================================= COMPUTE_MERGE_GAIN ========================================= //============================================================================================= virtual std::pair<std::vector<T>, T> compute_merge_gain(const VertexDescriptor<T> & comp1 , const VertexDescriptor<T> & comp2) override { VertexAttributeMap<T> reduced_vertex_attribute_map = boost::get(boost::vertex_bundle, this->reduced_graph); std::vector<T> merge_value(this->dim); T gain = 0; // compute the value obtained by mergeing the two connected components for(uint32_t i_dim=0; i_dim<this->dim; i_dim++) { merge_value[i_dim] = (reduced_vertex_attribute_map(comp1).weight * reduced_vertex_attribute_map(comp1).value[i_dim] +reduced_vertex_attribute_map(comp2).weight * reduced_vertex_attribute_map(comp2).value[i_dim]) /(reduced_vertex_attribute_map(comp1).weight +reduced_vertex_attribute_map(comp2).weight); gain += 0.5 * (pow(merge_value[i_dim],2) * (reduced_vertex_attribute_map(comp1).weight +reduced_vertex_attribute_map(comp2).weight) - pow(reduced_vertex_attribute_map(comp1).value[i_dim],2) * reduced_vertex_attribute_map(comp1).weight - pow(reduced_vertex_attribute_map(comp2).value[i_dim],2) * reduced_vertex_attribute_map(comp2).weight); } return std::pair<std::vector<T>, T>(merge_value, gain); } }; }
rhs_term.c
#include "mex.h" #include "blas.h" #include "conv2d.h" #define DEBUG 0 /* */ void rhs_parall(size_t Np, size_t K, size_t Nfp, double *Dr, double *Ds, double *LIFT, double *rx, double *ry, double *sx, double *sy, double *J, double *Js, double *dflux, double *eflux, double *gflux, double *rhs) { int k; #ifdef _OPENMP #pragma omp parallel for num_threads(DG_THREADS) #endif for (k = 0; k < K; k++) { /* volume term */ int n, m; for (m = 0; m < Np; m++) { double rx_ = rx[k * Np + m]; double ry_ = ry[k * Np + m]; double sx_ = sx[k * Np + m]; double sy_ = sy[k * Np + m]; double *rhsQ = rhs + (k * Np + m); double *E = eflux + k * Np; double *G = gflux + k * Np; for (n = 0; n < Np; n++) { double dr = Dr[n * Np + m]; double ds = Ds[n * Np + m]; double dx = rx_ * dr + sx_ * ds; double dy = ry_ * dr + sy_ * ds; rhsQ[0] -= dx * E[n] + dy * G[n]; } double j = 1 / J[k * Np + m]; for (n = 0; n < Nfp; n++) { double js = Js[k * Nfp + n]; double dfs = dflux[k * Nfp + n]; rhsQ[0] += LIFT[m + n * Np] * dfs * js * j; } } } return; } /* */ void rhs_term(size_t Np_, size_t K_, size_t Nfp_, double *Dr, double *Ds, double *LIFT, double *rx, double *ry, double *sx, double *sy, double *J, double *Js, double *dflux, double *eflux, double *gflux, double *rhs) { char *chn = "N"; double one = 1.0, zero = 0.0; double *vtemp = calloc(Np_ * K_, sizeof(double)); mwSignedIndex Np = Np_, K = K_, Nfp = Nfp_; dgemm(chn, chn, &Np, &K, &Np, &one, Dr, &Np, eflux, &Np, &zero, vtemp, &Np); dvecm(Np * K, -1, rx, vtemp, rhs); dgemm(chn, chn, &Np, &K, &Np, &one, Ds, &Np, eflux, &Np, &zero, vtemp, &Np); dvecm(Np * K, -1, sx, vtemp, rhs); dgemm(chn, chn, &Np, &K, &Np, &one, Dr, &Np, gflux, &Np, &zero, vtemp, &Np); dvecm(Np * K, -1, ry, vtemp, rhs); dgemm(chn, chn, &Np, &K, &Np, &one, Ds, &Np, gflux, &Np, &zero, vtemp, &Np); dvecm(Np * K, -1, sy, vtemp, rhs); double *stemp = calloc(Nfp * K, sizeof(double)); dvecm(Nfp * K, 1, Js, dflux, stemp); dgemm(chn, chn, &Np, &K, &Nfp, &one, LIFT, &Np, stemp, &Nfp, &zero, vtemp, &Np); dvecd(Np * K, 1, vtemp, J, rhs); free(vtemp); free(stemp); return; } /** * @brief calculate the R.H.S of conv2d problem. * * Usages: * [ rhsQ ] = rhs_term(f_Q, f_ext, u, v, * nx, ny, eidM, eidP, eidtype, EToR, % for surface term * Dr, Ds, rx, ry, sx, sy, LIFT, J, Js) % for rhs term */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* check input */ if (nrhs != 19) { mexErrMsgIdAndTxt("MATLAB:rhs_term:invalidNumInputs", "20 input required."); } else if (nlhs > 1) { mexErrMsgIdAndTxt("MATLAB:rhs_term:maxlhs", "Too many output arguments."); } double *f_Q = mxGetPr(prhs[0]); double *f_extQ = mxGetPr(prhs[1]); double *u = mxGetPr(prhs[2]); double *v = mxGetPr(prhs[3]); double *nx = mxGetPr(prhs[4]); double *ny = mxGetPr(prhs[5]); double *eidM = mxGetPr(prhs[6]); double *eidP = mxGetPr(prhs[7]); signed char *eidtype = (signed char *)mxGetData(prhs[8]); signed char *EToR = (signed char *)mxGetData(prhs[9]); double *Dr = mxGetPr(prhs[10]); double *Ds = mxGetPr(prhs[11]); double *rx = mxGetPr(prhs[12]); double *ry = mxGetPr(prhs[13]); double *sx = mxGetPr(prhs[14]); double *sy = mxGetPr(prhs[15]); double *LIFT = mxGetPr(prhs[16]); double *J = mxGetPr(prhs[17]); double *Js = mxGetPr(prhs[18]); /* get dimensions */ size_t Np = mxGetM(prhs[0]); size_t K = mxGetN(prhs[0]); size_t Nfp = mxGetM(prhs[6]); /* allocate output array */ plhs[0] = mxCreateDoubleMatrix((mwSize)Np, (mwSize)K, mxREAL); double *rhsQ = mxGetPr(plhs[0]); /* surfce integral term */ double *dflux = calloc(Nfp * K, sizeof(double)); surf_term(Nfp, K, f_Q, f_extQ, u, v, nx, ny, eidM, eidP, eidtype, EToR, dflux); // #if DEBUG // mexPrintf("dflux = \n"); // int n, k; // for (n = 0; n < Nfp; n++) // { // mexPrintf("\t"); // for (k = 0; k < K; k++) // { // mexPrintf("%e\t", dflux[k * Np + n]); // } // mexPrintf("\n"); // } // #endif /* volume flux term */ double *eflux = calloc(Np * K, sizeof(double)); double *gflux = calloc(Np * K, sizeof(double)); flux_term(Np, K, f_Q, u, v, EToR, eflux, gflux); // #if DEBUG // mexPrintf("e = \n"); // for (n = 0; n < Np; n++) // { // mexPrintf("\t"); // for (k = 0; k < K; k++) // { // mexPrintf("%f\t", eflux[k * Np + n]); // } // mexPrintf("\n"); // } // mexPrintf("g = \n"); // for (n = 0; n < Np; n++) // { // mexPrintf("\t"); // for (k = 0; k < K; k++) // { // mexPrintf("%f\t", gflux[k * Np + n]); // } // mexPrintf("\n"); // } // #endif /* DG rhs term */ rhs_term(Np, K, Nfp, Dr, Ds, LIFT, rx, ry, sx, sy, J, Js, dflux, eflux, gflux, rhsQ); /* fv surface term */ free(dflux); free(eflux); free(gflux); return; }
parallel_for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp=libiomp5 -verify %s // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd foo void test_no_clause() { int i; #pragma omp parallel for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp parallel for simd' must be a for loop}} #pragma omp parallel for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp parallel for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd; for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}} #pragma omp parallel for simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}} #pragma omp parallel for simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}} #pragma omp parallel for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp parallel for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel #pragma omp parallel for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp parallel for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp parallel for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp parallel for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd collapse(2) for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp parallel for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp parallel for simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp parallel for simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp parallel for simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp parallel for simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp parallel for simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp parallel for simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp parallel for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } }
SpatialAdaptiveAveragePooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialAdaptiveAveragePooling.c" #else #define START_IND(a,b,c) (int)floor((float)(a * c) / b) #define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b) // #define START_IND(a,b,c) a * c / b // #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0 // 4d tensor B x D x H x W static void THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)( real *input_p, real *output_p, int64_t sizeD, int64_t isizeH, int64_t isizeW, int64_t osizeH, int64_t osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { int64_t d; #pragma omp parallel for private(d) for (d = 0; d < sizeD; d++) { /* loop over output */ int64_t oh, ow; for(oh = 0; oh < osizeH; oh++) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = 0; ow < osizeW; ow++) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; /* local pointers */ real *ip = input_p + d*istrideD + istartH*istrideH + istartW*istrideW; real *op = output_p + d*osizeH*osizeW + oh*osizeW + ow; /* compute local average: */ real sum = 0; int ih, iw; for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { real val = *(ip + ih*istrideH + iw*istrideW); sum += val; } } /* set output to local average */ *op = sum / kW / kH; } } } } void THNN_(SpatialAdaptiveAveragePooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, int osizeW, int osizeH) { int dimD = 0; int dimH = 1; int dimW = 2; int64_t sizeB = 1; int64_t sizeD = 0; int64_t isizeH = 0; int64_t isizeW = 0; int64_t istrideB = 0; int64_t istrideD = 0; int64_t istrideH = 0; int64_t istrideW = 0; real *input_data = nullptr; real *output_data = nullptr; THNN_ARGCHECK(!input->is_empty() && (input->dim() == 3 || input->dim() == 4), 2, input, "non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s"); if (input->dim() == 4) { istrideB = input->stride(0); sizeB = input->size(0); dimD++; dimH++; dimW++; } /* sizes */ sizeD = input->size(dimD); isizeH = input->size(dimH); isizeW = input->size(dimW); /* strides */ istrideD = input->stride(dimD); istrideH = input->stride(dimH); istrideW = input->stride(dimW); /* resize output */ if (input->dim() == 3) { THTensor_(resize3d)(output, sizeD, osizeH, osizeW); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)(input_data, output_data, sizeD, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); } else { int64_t b; THTensor_(resize4d)(output, sizeB, sizeD, osizeH, osizeW); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); #pragma omp parallel for private(b) for (b = 0; b < sizeB; b++) { THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)(input_data+b*istrideB, output_data+b*sizeD*osizeH*osizeW, sizeD, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); } } } static void THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)( real *gradInput_p, real *gradOutput_p, int64_t sizeD, int64_t isizeH, int64_t isizeW, int64_t osizeH, int64_t osizeW) { int64_t d; #pragma omp parallel for private(d) for (d = 0; d < sizeD; d++) { real *gradInput_p_d = gradInput_p + d*isizeW*isizeH; real *gradOutput_p_d = gradOutput_p + d*osizeW*osizeH; /* calculate average */ int64_t oh, ow; for(oh = 0; oh < osizeH; oh++) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = 0; ow < osizeW; ow++) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; real grad_delta = gradOutput_p_d[oh*osizeW +ow] / kH / kW; int ih, iw; for(ih = istartH; ih < iendH; ih++) { for(iw = istartW; iw < iendW; iw++) { /* update gradient */ gradInput_p_d[ih*isizeW + iw] += grad_delta; } } } } } } void THNN_(SpatialAdaptiveAveragePooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput) { int dimD = 0; int dimH = 1; int dimW = 2; int64_t sizeB = 1; int sizeD; int isizeH; int isizeW; int osizeH; int osizeW; real *gradInput_data; real *gradOutput_data; /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); if (input->dim() == 4) { sizeB = input->size(0); dimD++; dimH++; dimW++; } /* sizes */ sizeD = input->size(dimD); isizeH = input->size(dimH); isizeW = input->size(dimW); osizeH = gradOutput->size(dimH); osizeW = gradOutput->size(dimW); /* get raw pointers */ gradInput_data = THTensor_(data)(gradInput); gradOutput_data = THTensor_(data)(gradOutput); /* backprop */ if (input->dim() == 3) { THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)(gradInput_data, gradOutput_data, sizeD, isizeH, isizeW, osizeH, osizeW); } else { int64_t b; #pragma omp parallel for private(b) for (b = 0; b < sizeB; b++) { THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)(gradInput_data+b*sizeD*isizeH*isizeW, gradOutput_data+b*sizeD*osizeH*osizeW, sizeD, isizeH, isizeW, osizeH, osizeW); } } /* cleanup */ THTensor_(free)(gradOutput); } #endif #undef START_IND #undef END_IND
test_verify_rowcols.c
#include "config.h" #include <limits.h> #include <math.h> #include <stddef.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include <unistd.h> #include "kseq.h" KSEQ_INIT(int, read) #include "parasail.h" #include "parasail/cpuid.h" #include "parasail/memory.h" #include "parasail/matrix_lookup.h" #include "func_verify_rowcols.h" static int verbose = 0; typedef struct gap_score { int open; int extend; } gap_score_t; gap_score_t gap_scores[] = { {10,1}, {10,2}, {14,2}, {40,2}, {INT_MIN,INT_MIN} }; static inline void parse_sequences( const char *filename, char ***strings_, unsigned long **sizes_, unsigned long *count_) { FILE* fp; kseq_t *seq = NULL; int l = 0; char **strings = NULL; unsigned long *sizes = NULL; unsigned long count = 0; unsigned long memory = 1000; fp = fopen(filename, "r"); if(fp == NULL) { perror("fopen"); exit(1); } strings = malloc(sizeof(char*) * memory); sizes = malloc(sizeof(unsigned long) * memory); seq = kseq_init(fileno(fp)); while ((l = kseq_read(seq)) >= 0) { strings[count] = strdup(seq->seq.s); if (NULL == strings[count]) { perror("strdup"); exit(1); } sizes[count] = seq->seq.l; ++count; if (count >= memory) { char **new_strings = NULL; unsigned long *new_sizes = NULL; memory *= 2; new_strings = realloc(strings, sizeof(char*) * memory); if (NULL == new_strings) { perror("realloc"); exit(1); } strings = new_strings; new_sizes = realloc(sizes, sizeof(unsigned long) * memory); if (NULL == new_sizes) { perror("realloc"); exit(1); } sizes = new_sizes; } } kseq_destroy(seq); fclose(fp); *strings_ = strings; *sizes_ = sizes; *count_ = count; } static inline unsigned long binomial_coefficient( unsigned long n, unsigned long k) { /* from http://blog.plover.com/math/choose.html */ unsigned long r = 1; unsigned long d; if (k > n) { return 0; } for (d = 1; d <= k; d++) { r *= n--; r /= d; } return r; } static inline void k_combination2( unsigned long pos, unsigned long *a, unsigned long *b) { double s; double i = floor(sqrt(2.0 * pos)) - 1.0; if (i <= 1.0) { i = 1.0; } s = i * (i - 1.0) / 2.0; while (pos - s >= i) { s += i; i += 1; } *a = (unsigned long)(pos - s); *b = (unsigned long)(i); } static inline int diff_array( unsigned long size, int *a, int *b) { unsigned long i = 0; for (i=0; i<size; ++i) { if (a[i] != b[i]) return 1; } return 0; } static void check_functions( parasail_function_group_t f, char **sequences, unsigned long *sizes, unsigned long pair_limit, const parasail_matrix_t *matrix_, gap_score_t gap) { const parasail_function_info_t *functions = f.fs; unsigned long matrix_index = 0; unsigned long gap_index = 0; unsigned long function_index = 0; unsigned long pair_index = 0; parasail_function_t *reference_function = NULL; const parasail_matrix_t ** matrices = parasail_matrices; const parasail_matrix_t * single_matrix[] = { matrix_, NULL }; if (NULL != matrix_) { matrices = single_matrix; } printf("checking %s functions\n", f.name); for (matrix_index=0; NULL!=matrices[matrix_index]; ++matrix_index) { const parasail_matrix_t *matrix = matrices[matrix_index]; const char *matrixname = matrix->name; if (verbose) printf("\t%s\n", matrixname); for (gap_index=0; INT_MIN!=gap_scores[gap_index].open; ++gap_index) { int open = gap_scores[gap_index].open; int extend = gap_scores[gap_index].extend; if (gap.open != INT_MIN && gap.extend != INT_MIN) { open = gap.open; extend = gap.extend; } if (verbose) printf("\t\topen=%d extend=%d\n", open, extend); reference_function = functions[0].pointer; for (function_index=1; NULL!=functions[function_index].pointer; ++function_index) { if (verbose) printf("\t\t\t%s\n", functions[function_index].name); unsigned long saturated = 0; #pragma omp parallel for for (pair_index=0; pair_index<pair_limit; ++pair_index) { parasail_result_t *reference_result = NULL; parasail_result_t *result = NULL; unsigned long a = 0; unsigned long b = 1; k_combination2(pair_index, &a, &b); //printf("\t\t\t\tpair=%lu (%lu,%lu)\n", pair_index, a, b); reference_result = reference_function( sequences[a], sizes[a], sequences[b], sizes[b], open, extend, matrix); result = functions[function_index].pointer( sequences[a], sizes[a], sequences[b], sizes[b], open, extend, matrix); if (result->saturated) { /* no point in comparing a result that saturated */ parasail_result_free(reference_result); parasail_result_free(result); #pragma omp atomic saturated += 1; continue; } if (reference_result->score != result->score) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) wrong score (%d!=%d)\n", functions[function_index].name, a, b, open, extend, matrixname, reference_result->score, result->score); } } if (diff_array( sizes[b], reference_result->score_row, result->score_row)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad score row\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (diff_array( sizes[a], reference_result->score_col, result->score_col)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad score col\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (reference_result->matches_row && diff_array( sizes[b], reference_result->matches_row, result->matches_row)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad matches row\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (reference_result->matches_col && diff_array( sizes[a], reference_result->matches_col, result->matches_col)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad matches col\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (reference_result->similar_row && diff_array( sizes[b], reference_result->similar_row, result->similar_row)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad similar row\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (reference_result->similar_col && diff_array( sizes[a], reference_result->similar_col, result->similar_col)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad similar col\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (reference_result->length_row && diff_array( sizes[b], reference_result->length_row, result->length_row)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad length row\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (reference_result->length_col && diff_array( sizes[a], reference_result->length_col, result->length_col)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad length col\n", functions[function_index].name, a, b, open, extend, matrixname); } } parasail_result_free(reference_result); parasail_result_free(result); } if (verbose && saturated) { printf("%s %d %d %s saturated %lu times\n", functions[function_index].name, open, extend, matrixname, saturated); } } if (gap.open != INT_MIN && gap.extend != INT_MIN) { /* user-specified gap, don't loop */ break; } } } } int main(int argc, char **argv) { unsigned long i = 0; unsigned long seq_count = 0; unsigned long limit = 0; char **sequences = NULL; unsigned long *sizes = NULL; char *endptr = NULL; char *filename = NULL; int c = 0; int test_scores = 1; int test_stats = 0; char *matrixname = NULL; const parasail_matrix_t *matrix = NULL; gap_score_t gap = {INT_MIN,INT_MIN}; while ((c = getopt(argc, argv, "f:m:n:o:e:vsS")) != -1) { switch (c) { case 'f': filename = optarg; break; case 'm': matrixname = optarg; break; case 'n': errno = 0; seq_count = strtol(optarg, &endptr, 10); if (errno) { perror("strtol"); exit(1); } break; case 'o': errno = 0; gap.open = strtol(optarg, &endptr, 10); if (errno) { perror("strtol gap.open"); exit(1); } break; case 'e': errno = 0; gap.extend = strtol(optarg, &endptr, 10); if (errno) { perror("strtol gap.extend"); exit(1); } break; case 'v': verbose = 1; break; case 's': test_stats = 1; break; case 'S': test_scores = 0; break; case '?': if (optopt == 'f' || optopt == 'n') { fprintf(stderr, "Option -%c requires an argument.\n", optopt); } else if (isprint(optopt)) { fprintf(stderr, "Unknown option `-%c'.\n", optopt); } else { fprintf(stderr, "Unknown option character `\\x%x'.\n", optopt); } exit(1); default: fprintf(stderr, "default case in getopt\n"); exit(1); } } if (filename) { parse_sequences(filename, &sequences, &sizes, &seq_count); } else { fprintf(stderr, "no filename specified\n"); exit(1); } /* select the matrix */ if (matrixname) { matrix = parasail_matrix_lookup(matrixname); if (NULL == matrix) { fprintf(stderr, "Specified substitution matrix not found.\n"); exit(1); } } limit = binomial_coefficient(seq_count, 2); printf("%lu choose 2 is %lu\n", seq_count, limit); #if HAVE_SSE2 if (parasail_can_use_sse2()) { if (test_scores) { check_functions(parasail_nw_rowcol_sse2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_rowcol_sse2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_rowcol_sse2, sequences, sizes, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_rowcol_sse2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_stats_rowcol_sse2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_stats_rowcol_sse2, sequences, sizes, limit, matrix, gap); } } #endif #if HAVE_SSE41 if (parasail_can_use_sse41()) { if (test_scores) { check_functions(parasail_nw_rowcol_sse41, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_rowcol_sse41, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_rowcol_sse41, sequences, sizes, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_rowcol_sse41, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_stats_rowcol_sse41, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_stats_rowcol_sse41, sequences, sizes, limit, matrix, gap); } } #endif #if HAVE_AVX2 if (parasail_can_use_avx2()) { if (test_scores) { check_functions(parasail_nw_rowcol_avx2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_rowcol_avx2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_rowcol_avx2, sequences, sizes, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_rowcol_avx2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_stats_rowcol_avx2, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_stats_rowcol_avx2, sequences, sizes, limit, matrix, gap); } } #endif #if HAVE_KNC { if (test_scores) { check_functions(parasail_nw_rowcol_knc, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_rowcol_knc, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_rowcol_knc, sequences, sizes, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_rowcol_knc, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_stats_rowcol_knc, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_stats_rowcol_knc, sequences, sizes, limit, matrix, gap); } } #endif if (test_scores) { check_functions(parasail_nw_rowcol_disp, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_rowcol_disp, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_rowcol_disp, sequences, sizes, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_rowcol_disp, sequences, sizes, limit, matrix, gap); check_functions(parasail_sg_stats_rowcol_disp, sequences, sizes, limit, matrix, gap); check_functions(parasail_sw_stats_rowcol_disp, sequences, sizes, limit, matrix, gap); } for (i=0; i<seq_count; ++i) { free(sequences[i]); } free(sequences); free(sizes); return 0; }
example_06-StructOfArrays-CellLinkedList-InnerOmp.c
/* * SPDX-License-Identifier: BSD-3-Clause * * example_06-StructOfArrays-CellLinkedList-InnerOmp.c : * Example of SPH Density Calculation using * fast neighbor search the main density loop via * Cell Linked List method, Struct of Arrays (SoA) * data layout, OpenMP parallelization at the * chunk level, no SIMD directives. * * (C) Copyright 2021 José Hugo Elsas * Author: José Hugo Elsas <jhelsas@gmail.com> * * Command Line Options: * -runs <int> : Set the number of repetitions (runs) for * calculating the density. The value of * the density is based on the last * iteration. * Default value: 1 * -run_seed <int>: Flag to set an alternative seed use for * for the PRNG. Instead of feeding seed * to the PRNG directly, it feeds * seed + iteration, as to generate different * configurations for each iteration. * Default value: 0 - (possible 0/1) * -seed <int>: Set the seed to use for the SPH particles * uniform position generation in the box * Default value: 123123123 * * -N <int>: Set the number of SPH particles to be used * Default value: 1e5 = 100,000 * -h <float>: Set the value of the smoothing kernel * parameter h, which corresponds to half * of the support of the kernel. * Default value: 0.05 * * -Nx <int>: Set the number of Cells in the X direction * Default value: 10 * -Ny <int>: Set the number of Cells in the Y direction * Default value: 10 * -Nz <int>: Set the number of Cells in the Z direction * Default value: 10 * * -Xmin <float>: Set the lower bound in the X direction for * the Cell Linked List box * Default value: 0.0 * -Ymin <float>: Set the lower bound in the Y direction for * the Cell Linked List box * Default value: 0.0 * -Ymin <float>: Set the lower bound in the Z direction for * the Cell Linked List box * Default value: 0.0 * * -Xmax <float>: Set the lower bound in the X direction for * the Cell Linked List box * Default value: 1.0 * -Ymax <float>: Set the lower bound in the Y direction for * the Cell Linked List box * Default value: 1.0 * -Zmax <float>: Set the lower bound in the Z direction for * the Cell Linked List box * Default value: 1.0 */ #include <math.h> #include <ctype.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <limits.h> #include <unistd.h> #include <stdbool.h> #include <sys/time.h> #include <inttypes.h> #include <omp.h> #include <gsl/gsl_math.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_heapsort.h> #include "sph_data_types.h" #include "sph_linked_list.h" #include "sph_utils.h" #ifndef M_PI #define M_PI (3.14159265358979323846) #endif #define COMPUTE_BLOCKS 5 int main_loop(int run, bool run_seed, int64_t N, double h, long int seed, void *swap_arr, linkedListBox *box, SPHparticle *lsph, double *times); int compute_density_3d_innerOmp(int N, double h, SPHparticle *lsph, linkedListBox *box); int compute_density_3d_chunk(int64_t node_begin, int64_t node_end, int64_t nb_begin, int64_t nb_end,double h, double* restrict x, double* restrict y, double* restrict z, double* restrict nu, double* restrict rho); double w_bspline_3d(double r,double h); int main(int argc, char **argv){ bool run_seed = false; // By default the behavior is is to use the same seed int runs = 1,err; // it only runs once long int seed = 123123123; // The default seed is 123123123 int64_t N = 100000; // The default number of particles is N = 1e5 = 100,000 double h=0.05; // The default kernel smoothing length is h = 0.05 linkedListBox *box; // Uninitialized Box containing the cells for the cell linked list method SPHparticle *lsph; // Uninitialized array of SPH particles box = (linkedListBox*)malloc(1*sizeof(linkedListBox)); // Create a box representing the entire 3d domain // allow for command line customization of the run arg_parse(argc,argv,&N,&h,&seed,&runs,&run_seed,box); // Parse the command line options // line arguments and override default values err = SPHparticle_SoA_malloc(N,&lsph); if(err) fprintf(stderr,"error in SPHparticle_SoA_malloc\n"); void *swap_arr = malloc(N*sizeof(double)); double times[runs*COMPUTE_BLOCKS]; for(int run=0;run<runs;run+=1) main_loop(run,run_seed,N,h,seed,swap_arr,box,lsph,times); bool is_cll = true; const char *prefix = "ex06,cll,SoA,inner"; print_time_stats(prefix,is_cll,N,h,seed,runs,lsph,box,times); print_sph_particles_density(prefix,is_cll,N,h,seed,runs,lsph,box); SPHparticleSOA_safe_free(N,&lsph); safe_free_box(box); free(swap_arr); return 0; } /* * Function main_loop: * Runs the main loop of the program, including the particle array generation, * density calculation and the timings annotations. * * Arguments: * run <int> : index (or value) or the present iteration * run_seed <bool> : boolean defining whether to use run index for seed or not * N <int> : Number of SPH particles to be used in the run * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * seed <long int> : seed for GSL PRNG generator to generate particle positions * box <linkedListBox> : Box of linked list cells, encapsulating the 3d domain * lsph <SPHparticle> : Array (pointer) of SPH particles to be updated * times <double> : Array to store the computation timings to be updated * Returns: * 0 : error code returned * lsph <SPHparticle> : SPH particle array is updated in the rho field by reference * times <double> : Times is updated by reference */ int main_loop(int run, bool run_seed, int64_t N, double h, long int seed, void *swap_arr, linkedListBox *box, SPHparticle *lsph, double *times) { int err; if(run_seed) err = gen_unif_rdn_pos_box(N,seed+run,box,lsph); else err = gen_unif_rdn_pos_box(N,seed,box,lsph); if(err) fprintf(stderr,"error in gen_unif_rdn_pos\n"); // ------------------------------------------------------ // double t0,t1,t2,t3,t4,t5; t0 = omp_get_wtime(); err = compute_hash_MC3D(N,lsph,box); // Compute Morton Z 3D hash based on the if(err) // cell index for each of the X, Y and Z fprintf(stderr,"error in compute_hash_MC3D\n"); // directions, in which a given particle reside t1 = omp_get_wtime(); qsort(lsph->hash,N,2*sizeof(int64_t),compare_int64_t); // Sort the Particle Hash Hashes, getting the shuffled // index necessary to re-shuffle the remaining arrays t2 = omp_get_wtime(); err = reorder_lsph_SoA(N,lsph,swap_arr); // Reorder all arrays according to the sorted hash, if(err) // As to have a quick way to retrieve a cell fprintf(stderr,"error in reorder_lsph_SoA\n"); // given its hash. t3 = omp_get_wtime(); err = setup_interval_hashtables(N,lsph,box); // Annotate the begining and end of each cell if(err) // on the cell linked list method for fast fprintf(stderr,"error in setup_interval_hashtables\n"); // neighbor search t4 = omp_get_wtime(); err = compute_density_3d_innerOmp(N,h,lsph,box); // Compute the density of the particles based if(err) // on the cell linked list method for fast fprintf(stderr,"error in compute_density\n"); // neighbor search // ------------------------------------------------------ // t5 = omp_get_wtime(); times[COMPUTE_BLOCKS*run+0] = t1-t0; // Time for compute morton Z 3d hash times[COMPUTE_BLOCKS*run+1] = t2-t1; // Time for sorting the particles' hashes times[COMPUTE_BLOCKS*run+2] = t3-t2; // Time for reordering all other arrays accordingly times[COMPUTE_BLOCKS*run+3] = t4-t3; // Time for setting up the interval hash tables times[COMPUTE_BLOCKS*run+4] = t5-t4; // Time for computing the SPH particle densities return 0; } /* * Function compute_density_3d_innerOmp: * Computes the SPH density from the particles using cell linked list, * with parallelization at the level of the outer-most loop of the chunk * contribution calculation. * * Arguments: * N <int> : Number of SPH particles to be used in the run * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * lsph <SPHparticle> : Array (pointer) of SPH particles to be updated * Returns: * 0 : error code returned * lsph <SPHparticle> : SPH particle array is updated in the rho field by reference */ int compute_density_3d_innerOmp(int N, double h, SPHparticle *lsph, linkedListBox *box){ khiter_t kbegin,kend; int64_t node_hash=-1,node_begin=0, node_end=0; // Start initializing the node indexes on the array int64_t nb_begin= 0, nb_end = 0; // initialize the neighbor indexes int64_t nblist[(2*box->width+1)*(2*box->width+1)*(2*box->width+1)]; // prepare a list of potential neighbor memset(lsph->rho,(int)0,N*sizeof(double)); // Pre-initialize the density to zero for (kbegin = kh_begin(box->hbegin); kbegin != kh_end(box->hbegin); kbegin++){ // Iterate over each receiver cell begin index if (kh_exist(box->hbegin, kbegin)){ // verify if that given iterator actually exists kend = kh_get(1, box->hend, kh_key(box->hbegin, kbegin)); // Then get the end of the receiver cell iterator node_hash = kh_key(box->hbegin, kbegin); // Then get the hash corresponding to it node_begin = kh_value(box->hbegin, kbegin); // Get the receiver cell begin index in the array node_end = kh_value(box->hend, kend); // Get the receiver cell end index in the array neighbour_hash_3d(node_hash,nblist,box->width,box); // then find the hashes of its neighbors for(int j=0;j<(2*box->width+1)*(2*box->width+1)*(2*box->width+1);j+=1){ // and the iterate over them if(nblist[j]>=0){ // if a given neighbor actually has particles nb_begin = kh_value(box->hbegin, kh_get(0, box->hbegin, nblist[j]) ); // then get the contributing cell begin index nb_end = kh_value(box->hend , kh_get(1, box->hend , nblist[j]) ); // and get the contributing cell end index compute_density_3d_chunk(node_begin,node_end,nb_begin,nb_end,h, // and compute the density contribution from lsph->x,lsph->y,lsph->z,lsph->nu,lsph->rho); // the contributing cell to the receiver cell } } } } return 0; } /* * Function compute_density_3d_chunk: * Computes the SPH density contribution for a pair of cells, from nb_ indexes * to the node_ indexes. The computation is performed in parallel at the * level of the node_ index, the outer-most, but without vectorization. * * Arguments: * node_begin <int> : Begin index of the receiver cell * node_end <int> : End index of the receiver cell * nb_begin <int> : Begin index of the sender (neighbor) cell * nb_end <int> : End index of the sender (neighbor) cell * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * x <double*> : Array of particles' X positions * y <double*> : Array of particles' Y positions * z <double*> : Array of particles' Z positions * nu <double*> : Array of particles' density weights (i.e. masses) * Returns: * 0 : error code returned * rho <double*> : Array of particles' densities */ int compute_density_3d_chunk(int64_t node_begin, int64_t node_end, int64_t nb_begin, int64_t nb_end,double h, double* restrict x, double* restrict y, double* restrict z, double* restrict nu, double* restrict rho){ #pragma omp parallel for // Execute the outer loop in parallel for(int64_t ii=node_begin;ii<node_end;ii+=1){ // Iterate over the ii index of the chunk double xii = x[ii]; // Load the X component of the ii particle position double yii = y[ii]; // Load the Y component of the ii particle position double zii = z[ii]; // Load the Z component of the ii particle position double rhoii = 0.0; // Initialize the chunk contribution to density for(int64_t jj=nb_begin;jj<nb_end;jj+=1){ // Iterate over each other particle in jj loop double q = 0.; // Initialize the distance double xij = xii-x[jj]; // Load and subtract jj particle's X position component double yij = yii-y[jj]; // Load and subtract jj particle's Y position component double zij = zii-z[jj]; // Load and subtract jj particle's Z position component q += xij*xij; // Add the jj contribution to the ii distance in X q += yij*yij; // Add the jj contribution to the ii distance in Y q += zij*zij; // Add the jj contribution to the ii distance in Z q = sqrt(q); // Sqrt to compute the distance rhoii += nu[jj]*w_bspline_3d(q,h); // Add up the contribution from the jj particle } // to the intermediary density and then rho[ii] += rhoii; // add the intermediary density tot he full density } return 0; } /* * Function w_bspline_3d: * Returns the normalized value of the cubic b-spline SPH smoothing kernel * * Arguments: * q <double> : Distance between particles * h <double> : Smoothing Length for the Smoothing Kernel w_bspline * Returns: * wq <double> : Normalized value of the kernel */ double w_bspline_3d(double r,double h){ const double A_d = 3./(2.*M_PI*h*h*h); // The 3d normalization constant double q=0.; // normalized distance, initialized to zero if(r<0||h<=0.) // If either distance or smoothing length exit(10); // are negative, declare an emergency q = r/h; // Compute the normalized distance if(q<=1) // If the distance is small return A_d*(2./3.-q*q + q*q*q/2.0); // Compute this first polynomal else if((1.<=q)&&(q<2.)) // If the distance is a bit larger return A_d*(1./6.)*(2.-q)*(2.-q)*(2.-q); // Compute this other polynomial else // Otherwise, if the distance is large return 0.; // The value of the kernel is 0 }
pooling_pack_x86.h
#include <emmintrin.h> #include <stdio.h> #include <assert.h> #include "pooling_param.h" #define POOL_GENERIC 0 #define POOL_K2S2 1 #define POOL_K3S2 2 #define POOL_K3S1 3 typedef void (*pooling_kernel_t)(const void* input, void* output, int inc, int inh, int inw, int outh, int outw, int, int, int, int, int, int, int pad_h1, int pad_w1, int); static inline float max(float a, float b) { if (a > b) return a; else return b; } static void avg_2x2s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int loopw = (inw - 1) >> 1; int looph = (inh - 1) >> 1; int remain_w = inw - outw * 2; __m128 scalar_025 = _mm_set1_ps(0.25f); __m128 scalar_05 = _mm_set1_ps(0.5f); if (inw % 2 == 0) { remain_w = 1; } else { remain_w = 0; } const float* line0 = input; const float* line1; float* out_ptr = output; __m128 line00; __m128 line01; __m128 line10; __m128 line11; __m128 sum0; __m128 sum1; __m128 sum; line00 = _mm_loadu_ps(line0); if (is_caffe == 1) { line00 = _mm_mul_ps(line00, scalar_025); } _mm_storeu_ps(out_ptr, line00); line0 += 4; out_ptr += 4; for (int i = 0; i < loopw; i++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); sum0 = _mm_add_ps(line00, line01); if (is_caffe == 0) { sum0 = _mm_mul_ps(sum0, scalar_05); } else { sum0 = _mm_mul_ps(sum0, scalar_025); } _mm_storeu_ps(out_ptr, sum0); out_ptr += 4; line0 += 8; } if (inw % 2 == 0) { line00 = _mm_loadu_ps(line0); if (is_caffe == 1) { line00 = _mm_mul_ps(line00, scalar_025); } _mm_storeu_ps(out_ptr, line00); out_ptr += 4; } line0 += remain_w * 4; line1 = line0 + inw * 4; for (int i = 0; i < looph; i++) { line00 = _mm_loadu_ps(line0); line10 = _mm_loadu_ps(line1); sum = _mm_add_ps(line00, line10); if (is_caffe == 0) { sum = _mm_mul_ps(sum, scalar_05); } else { sum = _mm_mul_ps(sum, scalar_025); } _mm_storeu_ps(out_ptr, sum); out_ptr += 4; line0 += 4; line1 += 4; for (int i = 0; i < loopw; i++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); sum0 = _mm_add_ps(line00, line01); sum1 = _mm_add_ps(line10, line11); sum = _mm_add_ps(sum0, sum1); sum = _mm_mul_ps(sum, scalar_025); _mm_storeu_ps(out_ptr, sum); out_ptr += 4; line0 += 8; line1 += 8; } if (inw % 2 == 0) { line00 = _mm_loadu_ps(line0); line10 = _mm_loadu_ps(line1); sum = _mm_add_ps(line00, line10); if (is_caffe == 0) { sum = _mm_mul_ps(sum, scalar_05); } else { sum = _mm_mul_ps(sum, scalar_025); } _mm_storeu_ps(out_ptr, sum); out_ptr += 4; } line0 += (inw + remain_w) * 4; line1 += (inw + remain_w) * 4; } if (inh % 2 == 0) { line00 = _mm_loadu_ps(line0); if (is_caffe == 1) { line00 = _mm_mul_ps(line00, scalar_025); } _mm_storeu_ps(out_ptr, line00); out_ptr += 4; line0 += 4; for (int i = 0; i < loopw; i++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); sum = _mm_add_ps(line00, line01); if (is_caffe == 0) { sum0 = _mm_mul_ps(sum0, scalar_05); } else { sum0 = _mm_mul_ps(sum0, scalar_025); } _mm_storeu_ps(out_ptr, sum); line0 += 8; out_ptr += 4; } if (inw % 2 == 0) { line00 = _mm_loadu_ps(line0); if (is_caffe == 1) { line00 = _mm_mul_ps(line00, scalar_025); } _mm_storeu_ps(out_ptr, line00); out_ptr += 4; } } } static void max_2x2s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int loopw = (inw - 1) >> 1; int looph = (inh - 1) >> 1; int remain_w = inw - outw * 2; if (inw % 2 == 0) { remain_w = 1; } else { remain_w = 0; } const float* line0 = input; const float* line1; float* out_ptr = output; __m128 line00; __m128 line01; __m128 line10; __m128 line11; __m128 max0; __m128 max1; __m128 max; line00 = _mm_loadu_ps(line0); _mm_storeu_ps(out_ptr, line00); line0 += 4; out_ptr += 4; for (int i = 0; i < loopw; i++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); max0 = _mm_max_ps(line00, line01); _mm_storeu_ps(out_ptr, max0); out_ptr += 4; line0 += 8; } if (inw % 2 == 0) { line00 = _mm_loadu_ps(line0); _mm_storeu_ps(out_ptr, line00); out_ptr += 4; } line0 += remain_w * 4; line1 = line0 + inw * 4; for (int i = 0; i < looph; i++) { line00 = _mm_loadu_ps(line0); line10 = _mm_loadu_ps(line1); max = _mm_max_ps(line00, line10); _mm_storeu_ps(out_ptr, max); out_ptr += 4; line0 += 4; line1 += 4; for (int i = 0; i < loopw; i++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); max0 = _mm_max_ps(line00, line01); max1 = _mm_max_ps(line10, line11); max = _mm_max_ps(max0, max1); _mm_storeu_ps(out_ptr, max); out_ptr += 4; line0 += 8; line1 += 8; } if (inw % 2 == 0) { line00 = _mm_loadu_ps(line0); line10 = _mm_loadu_ps(line1); max = _mm_max_ps(line00, line10); _mm_storeu_ps(out_ptr, max); out_ptr += 4; } line0 += (inw + remain_w) * 4; line1 += (inw + remain_w) * 4; } if (inh % 2 == 0) { line00 = _mm_loadu_ps(line0); _mm_storeu_ps(out_ptr, line00); out_ptr += 4; line0 += 4; for (int i = 0; i < loopw; i++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); max = _mm_max_ps(line00, line01); _mm_storeu_ps(out_ptr, max); line0 += 8; out_ptr += 4; } if (inw % 2 == 0) { line00 = _mm_loadu_ps(line0); _mm_storeu_ps(out_ptr, line00); out_ptr += 4; } } } static void avg_2x2s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 2; int remain_w = inw - outw * 2; const float* line0 = input; const float* line1 = input + inw * 4; float* out_ptr = output; __m128 scalar_025 = _mm_set1_ps(0.25f); __m128 scalar_05 = _mm_set1_ps(0.5f); __m128 line00; __m128 line01; __m128 line10; __m128 line11; __m128 add0; __m128 add1; __m128 add; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); add0 = _mm_add_ps(line00, line01); add1 = _mm_add_ps(line10, line11); add = _mm_add_ps(add0, add1); add = _mm_mul_ps(add, scalar_025); _mm_storeu_ps(out_ptr, add); line0 += 8; line1 += 8; out_ptr += 4; } if (pad_w1 > 0) { add = _mm_add_ps(line00, line10); add = _mm_mul_ps(add, scalar_05); _mm_storeu_ps(out_ptr, add); } line0 += (inw + remain_w) * 4; line1 += (inw + remain_w) * 4; } if (pad_h1 > 0) { for (int j = 0; j < outw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); add0 = _mm_add_ps(line00, line01); add0 = _mm_mul_ps(add0, scalar_05); _mm_storeu_ps(out_ptr, add0); line0 += 8; out_ptr += 4; } if (pad_w1 > 0) { line00 = _mm_loadu_ps(line0); _mm_storeu_ps(out_ptr, line00); } } } static void max_2x2s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 2; int remain_w = inw - outw * 2; const float* line0 = input; const float* line1 = input + inw * 4; float* out_ptr = output; __m128 line00; __m128 line01; __m128 line10; __m128 line11; __m128 max0; __m128 max1; __m128 max; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); max0 = _mm_max_ps(line00, line01); max1 = _mm_max_ps(line10, line11); max = _mm_max_ps(max0, max1); _mm_storeu_ps(out_ptr, max); line0 += 8; line1 += 8; out_ptr += 4; } if (pad_w1 > 0) { max = _mm_max_ps(line00, line10); _mm_storeu_ps(out_ptr, max); } line0 += (inw + remain_w) * 4; line1 += (inw + remain_w) * 4; } if (pad_h1 > 0) { for (int j = 0; j < outw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); max0 = _mm_max_ps(line00, line01); _mm_storeu_ps(out_ptr, max0); line0 += 8; out_ptr += 4; } if (pad_w1 > 0) { line00 = _mm_loadu_ps(line0); _mm_storeu_ps(out_ptr, line00); } } } static void max_3x3s1_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int mid_h = inh - 2; int mid_w = inw - 2; const float* line1 = input; const float* line2 = input + inw * 4; float* out_ptr = output; __m128 line10 = _mm_loadu_ps(line1); __m128 line11 = _mm_loadu_ps(line1 + 4); __m128 line20 = _mm_loadu_ps(line2); __m128 line21 = _mm_loadu_ps(line2 + 4); __m128 max1 = _mm_max_ps(line10, line20); __m128 max2 = _mm_max_ps(line11, line21); __m128 max12 = _mm_max_ps(max1, max2); _mm_storeu_ps(out_ptr, max12); out_ptr += 4; // h begin center----[line1+=1]---------------------------------- // for (int j = 0; j < mid_w; j++) // { // float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); // float max2 = arm64_max(arm64_max(line2[0], line2[1]), line2[2]); // *out_ptr = arm64_max(max2, max1); // out_ptr++; // line1 += 1; // line2 += 1; // } __m128 line12; __m128 line22; __m128 max; for (int j = 0; j < mid_w; j++) { line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); max12 = _mm_max_ps(_mm_max_ps(line10, line20), _mm_max_ps(line11, line21)); line12 = _mm_loadu_ps(line1 + 8); line22 = _mm_loadu_ps(line2 + 8); max = _mm_max_ps(line12, line22); _mm_storeu_ps(out_ptr, _mm_max_ps(max12, max)); out_ptr += 4; line1 += 4; line2 += 4; } // h begin right----[line1+=2]----------------------------------- // *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); // out_ptr++; // line1 += 2; // line2 += 2; line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); max12 = _mm_max_ps(_mm_max_ps(line10, line20), _mm_max_ps(line11, line21)); _mm_storeu_ps(out_ptr, max12); out_ptr += 4; line1 += 8; line2 += 8; // const float* line0 = input + c * in_hw; // for (int i = 0; i < mid_h; i++) // { // // left // float max0 = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); // *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), max0); // out_ptr++; // // mid // for (int j = 0; j < mid_w; j++) // { // float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); // float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); // float max2 = arm64_max(arm64_max(line2[0], line2[1]), line2[2]); // *out_ptr = arm64_max(arm64_max(max0, max1), max2); // out_ptr++; // line0 += 1; // line1 += 1; // line2 += 1; // } // max0 = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); // *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), max0); // out_ptr++; // line0 += 2; // line1 += 2; // line2 += 2; // } const float* line0 = input; __m128 max0; __m128 line00; __m128 line01; __m128 line02; for (int i = 0; i < mid_h; i++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); max1 = _mm_max_ps(line10, line11); max2 = _mm_max_ps(line20, line21); max0 = _mm_max_ps(line00, line01); max = _mm_max_ps(_mm_max_ps(max0, max1), max2); _mm_storeu_ps(out_ptr, max); out_ptr += 4; for (int j = 0; j < mid_w; j++) { /* code */ // for (int j = 0; j < mid_w; j++) // { // float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); // float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); // float max2 = arm64_max(arm64_max(line2[0], line2[1]), line2[2]); // *out_ptr = arm64_max(arm64_max(max0, max1), max2); // out_ptr++; // line0 += 1; // line1 += 1; // line2 += 1; // } // max0 = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); // *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), max0); // out_ptr++; // line0 += 2; // line1 += 2; // line2 += 2; line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line02 = _mm_loadu_ps(line0 + 8); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line12 = _mm_loadu_ps(line1 + 8); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); line22 = _mm_loadu_ps(line2 + 8); max0 = _mm_max_ps(_mm_max_ps(line00, line01), line02); max1 = _mm_max_ps(_mm_max_ps(line10, line11), line12); max2 = _mm_max_ps(_mm_max_ps(line20, line21), line22); _mm_storeu_ps(out_ptr, _mm_max_ps(_mm_max_ps(max0, max1), max2)); out_ptr += 4; line0 += 4; line1 += 4; line2 += 4; } line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); max0 = _mm_max_ps(line00, line01); max1 = _mm_max_ps(line10, line11); max2 = _mm_max_ps(line20, line21); _mm_storeu_ps(out_ptr, _mm_max_ps(_mm_max_ps(max0, max1), max2)); out_ptr += 4; line0 += 8; line1 += 8; line2 += 8; } // *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line0[0], line0[1])); // out_ptr++; // for (int j = 0; j < mid_w; j++) // { // float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); // float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); // *out_ptr = arm64_max(max0, max1); // out_ptr++; // line0 += 1; // line1 += 1; // } // *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line0[0], line0[1])); line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); max0 = _mm_max_ps(line00, line01); max1 = _mm_max_ps(line10, line11); _mm_storeu_ps(out_ptr, _mm_max_ps(max0, max1)); out_ptr += 4; for (int i = 0; i < mid_w; i++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line02 = _mm_loadu_ps(line0 + 8); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line12 = _mm_loadu_ps(line1 + 8); max0 = _mm_max_ps(line00, line01); max1 = _mm_max_ps(line10, line11); max2 = _mm_max_ps(line02, line12); _mm_storeu_ps(out_ptr, _mm_max_ps(_mm_max_ps(max0, max1), max2)); out_ptr += 4; line0 += 4; line1 += 4; } line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); max0 = _mm_max_ps(line00, line01); max1 = _mm_max_ps(line10, line11); _mm_storeu_ps(out_ptr, _mm_max_ps(max0, max1)); } static void max_3x3s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } const float* line0 = input; const float* line1 = input + inw * 4; const float* line2 = input + inw * 8; float* out_ptr = output; __m128 line00; __m128 line01; __m128 line02; __m128 line10; __m128 line11; __m128 line12; __m128 line20; __m128 line21; __m128 line22; __m128 max0; __m128 max1; __m128 max2; __m128 max; int remain_w = inw - 2 * outw; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line02 = _mm_loadu_ps(line0 + 8); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line12 = _mm_loadu_ps(line1 + 8); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); line22 = _mm_loadu_ps(line2 + 8); max0 = _mm_max_ps(_mm_max_ps(line00, line01), line02); max1 = _mm_max_ps(_mm_max_ps(line10, line11), line12); max2 = _mm_max_ps(_mm_max_ps(line20, line21), line22); _mm_storeu_ps(out_ptr, _mm_max_ps(_mm_max_ps(max0, max1), max2)); line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } if (pad_w1 == 1) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); max0 = _mm_max_ps(line00, line01); max1 = _mm_max_ps(line10, line11); max2 = _mm_max_ps(line20, line21); _mm_storeu_ps(out_ptr, _mm_max_ps(_mm_max_ps(max0, max1), max2)); out_ptr += 4; } line0 += (remain_w + inw) * 4; line1 += (remain_w + inw) * 4; line2 += (remain_w + inw) * 4; } if (pad_h1 == 1) { for (int j = 0; j < outw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line02 = _mm_loadu_ps(line0 + 8); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line12 = _mm_loadu_ps(line1 + 8); max0 = _mm_max_ps(_mm_max_ps(line00, line01), line02); max1 = _mm_max_ps(_mm_max_ps(line10, line11), line12); _mm_storeu_ps(out_ptr, _mm_max_ps(max0, max1)); line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } if (pad_w1 == 1) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); max0 = _mm_max_ps(line00, line01); max1 = _mm_max_ps(line10, line11); _mm_storeu_ps(out_ptr, _mm_max_ps(max0, max1)); } } } static void avg_3x3s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int loopw = (inw - 2) >> 1; int looph = outh - 1; if (is_caffe == 1 || inw % 2 == 1) { outw--; } if (is_caffe == 1 || inh % 2 == 1) outh--; int remain_w = inw - loopw * 2 + 1; if (is_caffe == 1) { remain_w = 1; } __m128 scalar_011 = _mm_set1_ps(0.11111111f); __m128 scalar_016 = _mm_set1_ps(0.16666667f); __m128 scalar_033 = _mm_set1_ps(0.3333333f); __m128 scalar_025 = _mm_set1_ps(0.25f); const float* line1 = input; const float* line2 = input + inw * 4; float* out_ptr = output; __m128 line10 = _mm_loadu_ps(line1); __m128 line11 = _mm_loadu_ps(line1 + 4); __m128 line20 = _mm_loadu_ps(line2); __m128 line21 = _mm_loadu_ps(line2 + 4); __m128 sum1 = _mm_add_ps(line10, line11); __m128 sum2 = _mm_add_ps(line20, line21); __m128 sum = _mm_add_ps(sum1, sum2); if (is_caffe == 0) { sum = _mm_mul_ps(sum, scalar_025); } else { sum = _mm_mul_ps(sum, scalar_011); } _mm_storeu_ps(out_ptr, sum); line1 += 4; line2 += 4; out_ptr += 4; __m128 line12; __m128 line22; for (int j = 0; j < loopw; j++) { line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line12 = _mm_loadu_ps(line1 + 8); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); line22 = _mm_loadu_ps(line2 + 8); sum1 = _mm_add_ps(line10, _mm_add_ps(line11, line12)); sum2 = _mm_add_ps(line20, _mm_add_ps(line21, line22)); sum = _mm_add_ps(sum1, sum2); if (is_caffe == 0) { sum = _mm_mul_ps(sum, scalar_016); } else { sum = _mm_mul_ps(sum, scalar_011); } _mm_storeu_ps(out_ptr, sum); line1 += 8; line2 += 8; out_ptr += 4; } if (inw % 2 == 1) { line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); sum1 = _mm_add_ps(line10, line11); sum2 = _mm_add_ps(line20, line21); sum = _mm_add_ps(sum1, sum2); if (is_caffe == 0) { sum = _mm_mul_ps(sum, scalar_025); } else { sum = _mm_mul_ps(sum, scalar_011); } _mm_storeu_ps(out_ptr, sum); out_ptr += 4; } else if (inw % 2 == 0 && is_caffe == 1) { // line10 = _mm_loadu_ps(line1); // line20 = _mm_loadu_ps(line2); // sum = _mm_add_ps(line10, line20); // sum = _mm_mul_ps(sum, scalar_016); // _mm_storeu_ps(out_ptr, sum); // out_ptr += 4; } line1 += remain_w * 4; line2 += remain_w * 4; const float* line0 = line1; line1 = line2; line2 = line1 + inw * 4; __m128 line00; __m128 line01; __m128 line02; __m128 sum0; for (int i = 0; i < looph; i++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); sum0 = _mm_add_ps(line00, line01); sum1 = _mm_add_ps(line10, line11); sum2 = _mm_add_ps(line20, line21); sum = _mm_add_ps(_mm_add_ps(sum0, sum1), sum2); if (is_caffe == 0) { sum = _mm_mul_ps(sum, scalar_016); } else { sum = _mm_mul_ps(sum, scalar_011); } _mm_storeu_ps(out_ptr, sum); line0 += 4; line1 += 4; line2 += 4; out_ptr += 4; for (int j = 0; j < loopw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line02 = _mm_loadu_ps(line0 + 8); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line12 = _mm_loadu_ps(line1 + 8); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); line22 = _mm_loadu_ps(line2 + 8); sum0 = _mm_add_ps(line00, _mm_add_ps(line01, line02)); sum1 = _mm_add_ps(line10, _mm_add_ps(line11, line12)); sum2 = _mm_add_ps(line20, _mm_add_ps(line21, line22)); sum = _mm_add_ps(sum0, _mm_add_ps(sum1, sum2)); sum = _mm_mul_ps(sum, scalar_011); _mm_storeu_ps(out_ptr, sum); out_ptr += 4; line0 += 8; line1 += 8; line2 += 8; } if (inw % 2 == 1) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); sum1 = _mm_add_ps(line10, line11); sum2 = _mm_add_ps(line20, line21); sum0 = _mm_add_ps(line00, line01); sum = _mm_add_ps(sum0, _mm_add_ps(sum1, sum2)); if (is_caffe == 0) { sum = _mm_mul_ps(sum, scalar_016); } else { sum = _mm_mul_ps(sum, scalar_011); } _mm_storeu_ps(out_ptr, sum); out_ptr += 4; } else if (inw % 2 == 0 && is_caffe == 1) { // line00 = _mm_loadu_ps(line0); // line10 = _mm_loadu_ps(line1); // line20 = _mm_loadu_ps(line2); // sum = _mm_add_ps(line00, _mm_add_ps(line10, line20)); // sum = _mm_mul_ps(sum, scalar_016); // _mm_storeu_ps(out_ptr, sum); // out_ptr += 4; } line0 += (inw + remain_w) * 4; line1 += (inw + remain_w) * 4; line2 += (inw + remain_w) * 4; } if (inh % 2 == 1) { line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); sum1 = _mm_add_ps(line10, line11); sum0 = _mm_add_ps(line00, line01); sum = _mm_add_ps(sum0, sum1); if (is_caffe == 0) { sum = _mm_mul_ps(sum, scalar_025); } else { sum = _mm_mul_ps(sum, scalar_011); } _mm_storeu_ps(out_ptr, sum); out_ptr += 4; line0 += 4; line1 += 4; for (int j = 0; j < loopw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line02 = _mm_loadu_ps(line0 + 8); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line12 = _mm_loadu_ps(line1 + 8); sum0 = _mm_add_ps(line00, _mm_add_ps(line01, line02)); sum1 = _mm_add_ps(line10, _mm_add_ps(line11, line12)); sum = _mm_add_ps(sum0, sum1); if (is_caffe == 0) { sum = _mm_mul_ps(sum, scalar_016); } else { sum = _mm_mul_ps(sum, scalar_011); } _mm_storeu_ps(out_ptr, sum); line0 += 8; line1 += 8; out_ptr += 4; } if (inw % 2 == 1) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); sum0 = _mm_add_ps(line00, line01); sum1 = _mm_add_ps(line10, line11); sum = _mm_add_ps(sum0, sum1); if (is_caffe == 0) { sum = _mm_mul_ps(sum, scalar_025); } else { sum = _mm_mul_ps(sum, scalar_011); } _mm_storeu_ps(out_ptr, sum); out_ptr += 4; } else if (inw % 2 == 0 && is_caffe == 1) { // line00 = _mm_loadu_ps(line0); // line10 = _mm_loadu_ps(line1); // sum = _mm_add_ps(line00, line10); // sum = _mm_mul_ps(sum, scalar_016); // _mm_storeu_ps(out_ptr, sum); // out_ptr += 4; } } else if (inh % 2 == 0 && is_caffe == 1) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); sum = _mm_add_ps(line00, line01); sum = _mm_mul_ps(sum, scalar_016); _mm_storeu_ps(out_ptr, sum); line0 += 4; out_ptr += 4; for (int j = 0; j < loopw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line02 = _mm_loadu_ps(line0 + 8); sum = _mm_add_ps(line00, _mm_add_ps(line01, line02)); sum = _mm_mul_ps(sum, scalar_016); _mm_storeu_ps(out_ptr, sum); line0 += 8; out_ptr += 4; } if (inw % 2 == 1) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); sum = _mm_add_ps(line00, line01); sum = _mm_mul_ps(sum, scalar_016); _mm_storeu_ps(out_ptr, sum); } else if (inw % 2 == 0) { // sum = _mm_loadu_ps(line0); // sum = _mm_mul_ps(sum, scalar_025); // _mm_storeu_ps(out_ptr, sum); } } } static void max_3x3s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { if (is_caffe == 1 || inw % 2 == 1) { outw--; } if (is_caffe == 1 || inh % 2 == 1) outh--; int loopw = outw - 1; int looph = outh - 1; int remain_w = inw - outw * 2 + 1; const float* line1 = input; const float* line2 = input + inw * 4; float* out_ptr = output; __m128 line10 = _mm_loadu_ps(line1); __m128 line11 = _mm_loadu_ps(line1 + 4); __m128 line20 = _mm_loadu_ps(line2); __m128 line21 = _mm_loadu_ps(line2 + 4); __m128 max1 = _mm_max_ps(line10, line11); __m128 max2 = _mm_max_ps(line20, line21); __m128 max = _mm_max_ps(max1, max2); _mm_storeu_ps(out_ptr, max); line1 += 4; line2 += 4; out_ptr += 4; __m128 line12; __m128 line22; for (int j = 0; j < loopw; j++) { line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line12 = _mm_loadu_ps(line1 + 8); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); line22 = _mm_loadu_ps(line2 + 8); max1 = _mm_max_ps(line10, _mm_max_ps(line11, line12)); max2 = _mm_max_ps(line20, _mm_max_ps(line21, line22)); max = _mm_max_ps(max1, max2); _mm_storeu_ps(out_ptr, max); line1 += 8; line2 += 8; out_ptr += 4; } if (inw % 2 == 1) { line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); max1 = _mm_max_ps(line10, line11); max2 = _mm_max_ps(line20, line21); max = _mm_max_ps(max1, max2); _mm_storeu_ps(out_ptr, max); out_ptr += 4; } else if (inw % 2 == 0 && is_caffe == 1) { line10 = _mm_loadu_ps(line1); line20 = _mm_loadu_ps(line2); _mm_storeu_ps(out_ptr, _mm_max_ps(line10, line20)); out_ptr += 4; } line1 += remain_w * 4; line2 += remain_w * 4; const float* line0 = line1; line1 = line2; line2 = line1 + inw * 4; __m128 line00; __m128 line01; __m128 line02; __m128 max0; for (int i = 0; i < looph; i++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); max0 = _mm_max_ps(line00, line01); max1 = _mm_max_ps(line10, line11); max2 = _mm_max_ps(line20, line21); max = _mm_max_ps(_mm_max_ps(max0, max1), max2); _mm_storeu_ps(out_ptr, max); line0 += 4; line1 += 4; line2 += 4; out_ptr += 4; for (int j = 0; j < loopw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line02 = _mm_loadu_ps(line0 + 8); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line12 = _mm_loadu_ps(line1 + 8); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); line22 = _mm_loadu_ps(line2 + 8); max0 = _mm_max_ps(line00, _mm_max_ps(line01, line02)); max1 = _mm_max_ps(line10, _mm_max_ps(line11, line12)); max2 = _mm_max_ps(line20, _mm_max_ps(line21, line22)); max = _mm_max_ps(max0, _mm_max_ps(max1, max2)); _mm_storeu_ps(out_ptr, max); out_ptr += 4; line0 += 8; line1 += 8; line2 += 8; } if (inw % 2 == 1) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); max1 = _mm_max_ps(line10, line11); max2 = _mm_max_ps(line20, line21); max0 = _mm_max_ps(line00, line01); max = _mm_max_ps(max0, _mm_max_ps(max1, max2)); _mm_storeu_ps(out_ptr, max); out_ptr += 4; } else if (inw % 2 == 0 && is_caffe == 1) { line00 = _mm_loadu_ps(line0); line10 = _mm_loadu_ps(line1); line20 = _mm_loadu_ps(line2); max = _mm_max_ps(line00, _mm_max_ps(line10, line20)); _mm_storeu_ps(out_ptr, max); out_ptr += 4; } line0 += (inw + remain_w) * 4; line1 += (inw + remain_w) * 4; line2 += (inw + remain_w) * 4; } if (inh % 2 == 1) { line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); max1 = _mm_max_ps(line10, line11); max0 = _mm_max_ps(line00, line01); max = _mm_max_ps(max0, max1); _mm_storeu_ps(out_ptr, max); out_ptr += 4; line0 += 4; line1 += 4; for (int j = 0; j < loopw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line02 = _mm_loadu_ps(line0 + 8); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line12 = _mm_loadu_ps(line1 + 8); max0 = _mm_max_ps(line00, _mm_max_ps(line01, line02)); max1 = _mm_max_ps(line10, _mm_max_ps(line11, line12)); max = _mm_max_ps(max0, max1); _mm_storeu_ps(out_ptr, max); line0 += 8; line1 += 8; out_ptr += 4; } if (inw % 2 == 1) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); max0 = _mm_max_ps(line00, line01); max1 = _mm_max_ps(line10, line11); max = _mm_max_ps(max0, max1); _mm_storeu_ps(out_ptr, max); out_ptr += 4; } else if (inw % 2 == 0 && is_caffe == 1) { line00 = _mm_loadu_ps(line0); line10 = _mm_loadu_ps(line1); max = _mm_max_ps(line00, line10); _mm_storeu_ps(out_ptr, max); out_ptr += 4; } } else if (inh % 2 == 0 && is_caffe == 1) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); max = _mm_max_ps(line00, line01); _mm_storeu_ps(out_ptr, max); line0 += 4; out_ptr += 4; for (int j = 0; j < loopw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line02 = _mm_loadu_ps(line0 + 8); max = _mm_max_ps(line00, _mm_max_ps(line01, line02)); _mm_storeu_ps(out_ptr, max); line0 += 8; out_ptr += 4; } if (inw % 2 == 1) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); max = _mm_max_ps(line00, line01); _mm_storeu_ps(out_ptr, max); } else if (inw % 2 == 0) { max = _mm_loadu_ps(line0); _mm_storeu_ps(out_ptr, max); } } } static void avg_3x3s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } const float* line0 = input; const float* line1 = input + inw * 4; const float* line2 = input + inw * 8; float* out_ptr = output; __m128 scalar_011 = _mm_set1_ps(0.11111111f); __m128 scalar_016 = _mm_set1_ps(0.16666667f); __m128 scalar_025 = _mm_set1_ps(0.25f); __m128 scalar_05 = _mm_set1_ps(0.5f); __m128 scalar_033 = _mm_set1_ps(0.33333333f); __m128 line00; __m128 line01; __m128 line02; __m128 line10; __m128 line11; __m128 line12; __m128 line20; __m128 line21; __m128 line22; __m128 sum0; __m128 sum1; __m128 sum2; __m128 sum; int remain_w = inw - 2 * outw; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line02 = _mm_loadu_ps(line0 + 8); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line12 = _mm_loadu_ps(line1 + 8); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); line22 = _mm_loadu_ps(line2 + 8); sum0 = _mm_add_ps(_mm_add_ps(line00, line01), line02); sum1 = _mm_add_ps(_mm_add_ps(line10, line11), line12); sum2 = _mm_add_ps(_mm_add_ps(line20, line21), line22); sum = _mm_add_ps(_mm_add_ps(sum0, sum1), sum2); sum = _mm_mul_ps(sum, scalar_011); _mm_storeu_ps(out_ptr, sum); line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } if (pad_w1 == 1) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line20 = _mm_loadu_ps(line2); line21 = _mm_loadu_ps(line2 + 4); sum0 = _mm_add_ps(line00, line01); sum1 = _mm_add_ps(line10, line11); sum2 = _mm_add_ps(line20, line21); sum = _mm_add_ps(_mm_add_ps(sum0, sum1), sum2); sum = _mm_mul_ps(sum, scalar_016); _mm_storeu_ps(out_ptr, sum); out_ptr += 4; } line0 += (remain_w + inw) * 4; line1 += (remain_w + inw) * 4; line2 += (remain_w + inw) * 4; } if (pad_h1 == 1) { for (int j = 0; j < outw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line02 = _mm_loadu_ps(line0 + 8); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); line12 = _mm_loadu_ps(line1 + 8); sum0 = _mm_add_ps(_mm_add_ps(line00, line01), line02); sum1 = _mm_add_ps(_mm_add_ps(line10, line11), line12); sum = _mm_add_ps(sum0, sum1); sum = _mm_mul_ps(sum, scalar_016); _mm_storeu_ps(out_ptr, sum); line0 += 8; line1 += 8; out_ptr += 4; } if (pad_w1 == 1) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line10 = _mm_loadu_ps(line1); line11 = _mm_loadu_ps(line1 + 4); sum0 = _mm_add_ps(line00, line01); sum1 = _mm_add_ps(line10, line11); sum = _mm_add_ps(sum0, sum1); sum = _mm_mul_ps(sum, scalar_025); _mm_storeu_ps(out_ptr, sum); } else if (pad_w1 == 2) { line00 = _mm_loadu_ps(line0); line10 = _mm_loadu_ps(line1); sum = _mm_add_ps(line00, line10); sum = _mm_mul_ps(sum, scalar_05); _mm_storeu_ps(out_ptr, sum); } } else if (pad_h1 == 2) { for (int j = 0; j < outw; j++) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); line02 = _mm_loadu_ps(line0 + 8); sum0 = _mm_add_ps(_mm_add_ps(line00, line01), line02); sum0 = _mm_mul_ps(sum0, scalar_033); _mm_storeu_ps(out_ptr, sum0); line0 += 8; out_ptr += 4; } if (pad_w1 == 1) { line00 = _mm_loadu_ps(line0); line01 = _mm_loadu_ps(line0 + 4); sum0 = _mm_add_ps(line00, line01); sum0 = _mm_mul_ps(sum0, scalar_05); _mm_storeu_ps(out_ptr, sum0); } else if (pad_w1 == 2) { line00 = _mm_loadu_ps(line0); _mm_storeu_ps(out_ptr, line00); } } } static void avg_global(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int block = in_hw >> 3; int tail = in_hw & ~7; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; float* out_ptr = output + c; float sum = 0.f; for (int j = 0; j < block; j++) { __m128 p00 = _mm_loadu_ps(line0); __m128 p01 = _mm_loadu_ps(line0 + 4); p00 = _mm_add_ps(p00, p01); sum += (p00[0] + p00[1] + p00[2] + p00[3]); line0 += 8; } for (int j = tail; j < in_hw; j++) { sum += line0[0]; line0++; } *out_ptr = sum / in_hw; } } static void max_global(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int block = in_hw >> 3; int tail = in_hw & ~7; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; float* out_ptr = output + c; __m128 p00 = _mm_loadu_ps(line0); __m128 res = p00; for (int j = 0; j < block; j++) { __m128 p00 = _mm_loadu_ps(line0); __m128 p01 = _mm_loadu_ps(line0 + 4); __m128 max0 = _mm_max_ps(p00, p01); res = _mm_max_ps(res, max0); line0 += 8; } float max_ = max(max(res[0], res[1]), max(res[2], res[3])); for (int j = tail; j < in_hw; j++) { max_ = max(max_, line0[0]); line0++; } *out_ptr = max_; } } int pooling_kernel_perf_prerun(struct ir_tensor* input, struct ir_tensor* out, struct pool_param* param) { int pool_size = POOL_GENERIC; /* global pooling */ if (param->global) { if (param->pool_method == POOL_AVG) param->funct = ( pooling_kernel_t )avg_global; else if (param->pool_method == POOL_MAX) param->funct = ( pooling_kernel_t )max_global; assert(param->funct != NULL); return 0; } /* general pooling */ if (param->stride_h == 2 && param->stride_w == 2) { if (param->kernel_h == 2 && param->kernel_w == 2) pool_size = POOL_K2S2; else if (param->kernel_h == 3 && param->kernel_w == 3) pool_size = POOL_K3S2; } else if (param->stride_h == 1 && param->stride_w == 1) { if (param->kernel_h == 3 && param->kernel_w == 3) pool_size = POOL_K3S1; } int pool_method; // 0:max 1:avg int kernel_h; int kernel_w; int stride_h; int stride_w; int pad_h0; int pad_h1; int pad_w0; int pad_w1; int global; // 0:general 1:global int caffe_flavor; /* general max pooling, k2s2, k2k2p1, k3s1p1, k3s2, k3s2p1 */ if (param->pool_method == POOL_MAX) { if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1)) { if (param->pad_h0 == 0) { if (pool_size == POOL_K2S2) { param->funct = ( pooling_kernel_t )max_2x2s2; } else if (pool_size == POOL_K3S2) { param->funct = ( pooling_kernel_t )max_3x3s2; } } else if (param->pad_h0 == 1) { if (pool_size == POOL_K2S2) { param->funct = ( pooling_kernel_t )max_2x2s2_p1; } else if (pool_size == POOL_K3S2) { param->funct = ( pooling_kernel_t )max_3x3s2_p1; } else if (pool_size == POOL_K3S1) { param->funct = ( pooling_kernel_t )max_3x3s1_p1; } } } if (param->funct != NULL) return 0; else { fprintf(stderr, "perf general max pooling func not be find\n"); return -1; } } /* general avg pooling, k2s2, k2s2p1, k3s2, k3s2p1 */ if (param->pool_method == POOL_AVG) { if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1)) { if (param->pad_h0 == 0 && param->pad_h1 == 0) { if (pool_size == POOL_K2S2) { param->funct = ( pooling_kernel_t )avg_2x2s2; } else if (pool_size == POOL_K3S2) { param->funct = ( pooling_kernel_t )avg_3x3s2; } } else if (param->pad_h0 == 1 && param->pad_h1 == 1) { if (pool_size == POOL_K2S2) { param->funct = ( pooling_kernel_t )avg_2x2s2_p1; } else if (pool_size == POOL_K3S2) { param->funct = ( pooling_kernel_t )avg_3x3s2_p1; } } } if (param->funct != NULL) return 0; else { fprintf(stderr, "perf general avg pooling func not be find\n"); return -1; } } fprintf(stderr, "perf pooling func not be find\n"); return -1; } #define PACK4 4 static void pack4(float* input, float* input_buffer, int in_h, int in_w) { for (size_t i = 0; i < in_h; i++) { for (int j = 0; j < in_w; j++) { for (int c = 0; c < PACK4; c++) { input_buffer[i * in_w * PACK4 + j * PACK4 + c] = input[c * in_w * in_h + i * in_w + j]; } } } } static void unpack4(float* output_buffer, float* output, int out_h, int out_w) { for (size_t i = 0; i < PACK4; i++) { for (size_t j = 0; j < out_h; j++) { for (size_t k = 0; k < out_w; k++) { output[i * out_h * out_w + j * out_w + k] = output_buffer[j * out_w * PACK4 + k * PACK4 + i]; } } } } int pooling_kernel_perf_run(struct ir_tensor* input, struct ir_tensor* output, struct pool_param* param, int num_thread) { // fprintf(stderr, "perf pooling_kernel_run\n"); int is_caffe = param->caffe_flavor; pooling_kernel_t kernel = (pooling_kernel_t)(param->funct); int batch = input->dims[0]; int c = input->dims[1]; int in_h = input->dims[2]; int in_w = input->dims[3]; int out_h = output->dims[2]; int out_w = output->dims[3]; int img_size = c * in_h * in_w; int feature_size = c * out_h * out_w; if (param->global) { for (int n = 0; n < batch; n++) { void* input_frame = input->data + n * img_size * input->elem_size; void* output_frame = output->data + n * feature_size * output->elem_size; #pragma omp parallel for num_threads(num_thread) for (int ch = 0; ch < c; ch++) { void* cur_input = input_frame + ch * in_h * in_w * input->elem_size; void* cur_output = output_frame + ch * out_h * out_w * output->elem_size; kernel(cur_input, cur_output, 1, in_h, in_w, out_h, out_w, param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->pad_h1, param->pad_w1, is_caffe); } } } else { int packc4 = c >> 2; float* input_buffer = ( float* )calloc(sizeof(float), PACK4 * in_h * in_w); float* output_buffer = ( float* )calloc(sizeof(float), PACK4 * out_h * out_w); for (int n = 0; n < batch; n++) { for (int pck = 0; pck < packc4; pck++) { float* input_cur = ( float* )input->data + n * img_size + pck * PACK4 * in_h * in_w; float* output_cur = ( float* )output->data + n * feature_size + pck * PACK4 * out_h * out_w; pack4(input_cur, input_buffer, in_h, in_w); kernel(input_buffer, output_buffer, c, in_h, in_w, out_h, out_w, param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->pad_h1, param->pad_w1, is_caffe); unpack4(output_buffer, output_cur, out_h, out_w); } } free(input_buffer); free(output_buffer); } return 0; }
ocp_nlp_sqp.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "acados/ocp_nlp/ocp_nlp_sqp.h" // external #include <assert.h> #include <math.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif // blasfeo #include "blasfeo/include/blasfeo_d_aux.h" #include "blasfeo/include/blasfeo_d_aux_ext_dep.h" #include "blasfeo/include/blasfeo_d_blas.h" // acados #include "acados/ocp_nlp/ocp_nlp_common.h" #include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h" #include "acados/ocp_nlp/ocp_nlp_reg_common.h" #include "acados/ocp_qp/ocp_qp_common.h" #include "acados/sim/sim_common.h" #include "acados/utils/math.h" #include "acados/utils/mem.h" #include "acados/utils/print.h" #include "acados/utils/timing.h" #include "acados/utils/types.h" /************************************************ * options ************************************************/ int ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; int size = 0; size += sizeof(ocp_nlp_sqp_opts); size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); size += config->regularize->opts_calculate_size(); // dynamics size += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { size += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]); } // cost size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]); } // constraints size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]); } return size; } void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; char *c_ptr = (char *) raw_memory; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_opts); opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr); c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); opts->regularize = config->regularize->opts_assign(c_ptr); c_ptr += config->regularize->opts_calculate_size(); // dynamics opts->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { opts->dynamics[ii] = dynamics[ii]->opts_assign(dynamics[ii], dims->dynamics[ii], c_ptr); c_ptr += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]); } // cost opts->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { opts->cost[ii] = cost[ii]->opts_assign(cost[ii], dims->cost[ii], c_ptr); c_ptr += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]); } // constraints opts->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { opts->constraints[ii] = constraints[ii]->opts_assign(constraints[ii], dims->constraints[ii], c_ptr); c_ptr += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]); } assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; ocp_nlp_reg_config *regularize = config->regularize; int ii; int N = dims->N; // SQP opts opts->max_iter = 20; opts->tol_stat = 1e-8; opts->tol_eq = 1e-8; opts->tol_ineq = 1e-8; opts->tol_comp = 1e-8; opts->reuse_workspace = 1; #if defined(ACADOS_WITH_OPENMP) opts->num_threads = ACADOS_NUM_THREADS; #endif opts->ext_qp_res = 0; opts->qp_warm_start = 0; // submodules opts // qp solver qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts); // overwrite default qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_stat", &opts->tol_stat); qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_eq", &opts->tol_eq); qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq); qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_comp", &opts->tol_comp); // regularization regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize); // dynamics for (ii = 0; ii < N; ii++) { dynamics[ii]->opts_initialize_default(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { cost[ii]->opts_initialize_default(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { constraints[ii]->opts_initialize_default(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } return; } void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int ii; int N = dims->N; qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { dynamics[ii]->opts_update(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { cost[ii]->opts_update(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { constraints[ii]->opts_update(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } return; } void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_; ocp_nlp_config *config = config_; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if(char_!=NULL) { module_length = char_-field; for(ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if(!strcmp(ptr_module, "qp")) { config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value); if(!strcmp(field, "qp_warm_start")) { int* i_ptr = (int *) value; opts->qp_warm_start = *i_ptr; } } else // nlp opts { if (!strcmp(field, "max_iter")) { int* max_iter = (int *) value; opts->max_iter = *max_iter; } else if (!strcmp(field, "reuse_workspace")) { int* reuse_workspace = (int *) value; opts->reuse_workspace = *reuse_workspace; } else if (!strcmp(field, "num_threads")) { int* num_threads = (int *) value; opts->num_threads = *num_threads; } else if (!strcmp(field, "tol_stat")) // TODO rename !!! { double* tol_stat = (double *) value; opts->tol_stat = *tol_stat; // pass to QP too config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_stat", value); } else if (!strcmp(field, "tol_eq")) // TODO rename !!! { double* tol_eq = (double *) value; opts->tol_eq = *tol_eq; // pass to QP too config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_eq", value); } else if (!strcmp(field, "tol_ineq")) // TODO rename !!! { double* tol_ineq = (double *) value; opts->tol_ineq = *tol_ineq; // pass to QP too config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_ineq", value); } else if (!strcmp(field, "tol_comp")) // TODO rename !!! { double* tol_comp = (double *) value; opts->tol_comp = *tol_comp; // pass to QP too config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_comp", value); } else if (!strcmp(field, "exact_hess")) { int N = config->N; // cost for (ii=0; ii<=N; ii++) config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value); // dynamics for (ii=0; ii<N; ii++) config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value); // constraints TODO disabled for now as prevents convergence !!! // for (ii=0; ii<=N; ii++) // config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value); } else if (!strcmp(field, "ext_qp_res")) { int* ext_qp_res = (int *) value; opts->ext_qp_res = *ext_qp_res; } else { printf("\nerror: ocp_nlp_sqp_opts_set: wrong field: %s\n", field); exit(1); } } return; } void ocp_nlp_sqp_dynamics_opts_set(void *config_, void *opts_, int stage, const char *field, void *value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_dynamics_config *dyn_config = config->dynamics[stage]; dyn_config->opts_set(dyn_config, opts->dynamics[stage], field, value); return; } void ocp_nlp_sqp_cost_opts_set(void *config_, void *opts_, int stage, const char *field, void *value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_cost_config *cost_config = config->cost[stage]; cost_config->opts_set(cost_config, opts->cost[stage], field, value); return; } void ocp_nlp_sqp_constraints_opts_set(void *config_, void *opts_, int stage, const char *field, void *value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_constraints_config *constraints_config = config->constraints[stage]; constraints_config->opts_set(constraints_config, opts->constraints[stage], (char *) field, value); return; } /************************************************ * memory ************************************************/ int ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // extract dims int N = dims->N; // ocp_nlp_cost_dims **cost_dims = dims->cost; // int ny; int size = 0; size += sizeof(ocp_nlp_sqp_memory); // qp solver size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // dynamics size += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { size += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } // nlp res size += ocp_nlp_res_calculate_size(dims); // nlp mem size += ocp_nlp_memory_calculate_size(config, dims); // stat int stat_m = opts->max_iter+1; int stat_n = 6; if(opts->ext_qp_res) stat_n += 4; size += stat_n*stat_m*sizeof(double); size += 8; // initial align // make_int_multiple_of(64, &size); return size; } void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; char *c_ptr = (char *) raw_memory; // extract dims int N = dims->N; // ocp_nlp_cost_dims **cost_dims = dims->cost; // int ny; // initial align align_char_to(8, &c_ptr); ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_memory); // QP solver mem->qp_solver_mem = qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr); c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize, opts->regularize, c_ptr); c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // nlp res mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr); c_ptr += mem->nlp_res->memsize; // nlp mem mem->nlp_mem = ocp_nlp_memory_assign(config, dims, c_ptr); c_ptr += ocp_nlp_memory_calculate_size(config, dims); // dynamics mem->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { mem->dynamics[ii] = dynamics[ii]->memory_assign(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii], c_ptr); c_ptr += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost mem->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { mem->cost[ii] = cost[ii]->memory_assign(cost[ii], dims->cost[ii], opts->cost[ii], c_ptr); c_ptr += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints mem->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { mem->constraints[ii] = constraints[ii]->memory_assign( constraints[ii], dims->constraints[ii], opts->constraints[ii], c_ptr); c_ptr += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } // stat mem->stat = (double *) c_ptr; mem->stat_m = opts->max_iter+1; mem->stat_n = 6; if(opts->ext_qp_res) mem->stat_n += 4; c_ptr += mem->stat_m*mem->stat_n*sizeof(double); mem->status = ACADOS_READY; assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr); return mem; } /************************************************ * workspace ************************************************/ int ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // loop index int ii; // extract dims int N = dims->N; int size = 0; int size_tmp = 0; int tmp; // sqp size += sizeof(ocp_nlp_sqp_work); // array of pointers // cost size += (N + 1) * sizeof(void *); // dynamics size += N * sizeof(void *); // constraints size += (N + 1) * sizeof(void *); // qp in size += ocp_qp_in_calculate_size(qp_solver, dims->qp_solver); // qp out size += ocp_qp_out_calculate_size(qp_solver, dims->qp_solver); if(opts->ext_qp_res) { // qp res size += ocp_qp_res_calculate_size(dims->qp_solver); // qp res ws size += ocp_qp_res_workspace_calculate_size(dims->qp_solver); } if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } #else // qp solver tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (ii = 0; ii < N; ii++) { tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (ii = 0; ii <= N; ii++) { tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (ii = 0; ii <= N; ii++) { tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } size += size_tmp; #endif } else { // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } } return size; } // TODO(all): introduce member "memsize" in all structures to make on-line cast cheaper (i.e. avoid // to calculate size on-line) static void ocp_nlp_sqp_cast_workspace(void *config_, ocp_nlp_dims *dims, ocp_nlp_sqp_work *work, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_opts *opts) { ocp_nlp_config *config = (ocp_nlp_config *) config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // extract dims int N = dims->N; // sqp char *c_ptr = (char *) work; c_ptr += sizeof(ocp_nlp_sqp_work); // array of pointers // work->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); // work->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // work->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // qp in work->qp_in = ocp_qp_in_assign(qp_solver, dims->qp_solver, c_ptr); c_ptr += ocp_qp_in_calculate_size(qp_solver, dims->qp_solver); // qp out work->qp_out = ocp_qp_out_assign(qp_solver, dims->qp_solver, c_ptr); c_ptr += ocp_qp_out_calculate_size(qp_solver, dims->qp_solver); if(opts->ext_qp_res) { // qp res work->qp_res = ocp_qp_res_assign(dims->qp_solver, c_ptr); c_ptr += ocp_qp_res_calculate_size(dims->qp_solver); // qp res ws work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver, c_ptr); c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver); } if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } #else // qp solver work->qp_work = (void *) c_ptr; // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; } #endif } else { // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } } // assert & return assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr); return; } /************************************************ * functions ************************************************/ static void initialize_qp(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in, ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_work *work) { ocp_nlp_config *config = (ocp_nlp_config *) config_; // loop index int ii; // extract dims int N = dims->N; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (ii = 0; ii <= N; ii++) { // cost config->cost[ii]->initialize(config->cost[ii], dims->cost[ii], nlp_in->cost[ii], opts->cost[ii], mem->cost[ii], work->cost[ii]); // dynamics if (ii < N) config->dynamics[ii]->initialize(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->dynamics[ii], mem->dynamics[ii], work->dynamics[ii]); // constraints config->constraints[ii]->initialize(config->constraints[ii], dims->constraints[ii], nlp_in->constraints[ii], opts->constraints[ii], mem->constraints[ii], work->constraints[ii]); } return; } static void linearize_update_qp_matrices(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in, ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_work *work) { ocp_nlp_config *config = (ocp_nlp_config *) config_; // loop index int i; // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; ocp_nlp_memory *nlp_mem = mem->nlp_mem; /* stage-wise multiple shooting lagrangian evaluation */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // init Hessian to 0 blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, work->qp_in->RSQrq+i, 0, 0); // dynamics if (i < N) config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i], nlp_in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); // cost config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], nlp_in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); // constraints config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i], nlp_in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } /* collect stage-wise evaluations */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i <= N; i++) { // nlp mem: cost_grad struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]); blasfeo_dveccp(nv[i], cost_grad, 0, nlp_mem->cost_grad + i, 0); // nlp mem: dyn_fun if (i < N) { struct blasfeo_dvec *dyn_fun = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); blasfeo_dveccp(nx[i + 1], dyn_fun, 0, nlp_mem->dyn_fun + i, 0); } // nlp mem: dyn_adj if (i < N) { struct blasfeo_dvec *dyn_adj = config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]); blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, nlp_mem->dyn_adj + i, 0); } else { blasfeo_dvecse(nu[N] + nx[N], 0.0, nlp_mem->dyn_adj + N, 0); } if (i > 0) { struct blasfeo_dvec *dyn_adj = config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]); blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], nlp_mem->dyn_adj+i, nu[i], nlp_mem->dyn_adj+i, nu[i]); } // nlp mem: ineq_fun struct blasfeo_dvec *ineq_fun = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); blasfeo_dveccp(2 * ni[i], ineq_fun, 0, nlp_mem->ineq_fun + i, 0); // nlp mem: ineq_adj struct blasfeo_dvec *ineq_adj = config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]); blasfeo_dveccp(nv[i], ineq_adj, 0, nlp_mem->ineq_adj + i, 0); } // TODO(all): still to clean !!!!!!!!!!!!! for (i = 0; i <= N; i++) { // TODO(rien) where should the update happen??? move to qp update ??? // TODO(all): fix and move where appropriate // if(i<N) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme != NULL && opts->scheme->type != exact) // { // for (int_t j = 0; j < nx; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j]; // for (int_t j = 0; j < nu; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j]; // } // } } return; } // update QP rhs for SQP (step prim var, abs dual var) // TODO(all): move in dynamics, cost, constraints modules ??? static void sqp_update_qp_vectors(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in, ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_work *work) { // loop index int i; // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; ocp_nlp_memory *nlp_mem = mem->nlp_mem; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // g blasfeo_dveccp(nv[i], nlp_mem->cost_grad + i, 0, work->qp_in->rqz + i, 0); // b if (i < N) blasfeo_dveccp(nx[i + 1], nlp_mem->dyn_fun + i, 0, work->qp_in->b + i, 0); // d blasfeo_dveccp(2 * ni[i], nlp_mem->ineq_fun + i, 0, work->qp_in->d + i, 0); } return; } static void sqp_update_variables(void *config_, ocp_nlp_dims *dims, ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_work *work) { // loop index int i; // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // ocp_nlp_config *config = (ocp_nlp_config *) config_; // TODO(all): fix and move where appropriate // for (i = 0; i < N; i++) // { // nx1 = dims->constraints[i+1]->nx; // for (j = 0; j < nx1; j++) // { // work->sim_in[i]->S_adj[j] = -BLASFEO_DVECEL(&work->qp_out->pi[i], j); // } // } #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // (full) step in primal variables blasfeo_daxpy(nv[i], 1.0, work->qp_out->ux + i, 0, nlp_out->ux + i, 0, nlp_out->ux + i, 0); // absolute in dual variables if (i < N) blasfeo_dveccp(nx[i + 1], work->qp_out->pi + i, 0, nlp_out->pi + i, 0); blasfeo_dveccp(2 * ni[i], work->qp_out->lam + i, 0, nlp_out->lam + i, 0); blasfeo_dveccp(2 * ni[i], work->qp_out->t + i, 0, nlp_out->t + i, 0); } return; } // Simple fixed-step Gauss-Newton based SQP routine int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { // acados timer acados_timer timer0, timer1; // start timer acados_tic(&timer0); ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_work *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, work, mem, opts); // zero timers double total_time = 0.0; mem->time_qp_sol = 0.0; mem->time_lin = 0.0; mem->time_reg = 0.0; mem->time_tot = 0.0; // extract dims int N = dims->N; int ii; int qp_iter = 0; int qp_status = 0; #if defined(ACADOS_WITH_OPENMP) // backup number of threads int num_threads_bkp = omp_get_num_threads(); // set number of threads omp_set_num_threads(opts->num_threads); #pragma omp parallel { // beginning of parallel region #endif // alias to dynamics_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii < N; ii++) { config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, mem->dynamics[ii]); config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_BAbt_ptr(work->qp_in->BAbt+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_RSQrq_ptr(work->qp_in->RSQrq+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_z_alg_ptr(nlp_out->z+ii, mem->dynamics[ii]); } // alias to cost_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii <= N; ii++) { config->cost[ii]->memory_set_ux_ptr(nlp_out->ux + ii, mem->cost[ii]); if (dims->nz[ii] > 0) { config->cost[ii]->memory_set_z_alg_ptr( &(((ocp_nlp_dynamics_cont_memory *) mem->dynamics[ii])->z_out), mem->cost[ii]); config->cost[ii]->memory_set_dzdux_tran_ptr( &(((ocp_nlp_dynamics_cont_memory *) mem->dynamics[ii])->dzdux_tran), mem->cost[ii]); } config->cost[ii]->memory_set_RSQrq_ptr(work->qp_in->RSQrq + ii, mem->cost[ii]); config->cost[ii]->memory_set_Z_ptr(work->qp_in->Z + ii, mem->cost[ii]); } // alias to constraints_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii <= N; ii++) { config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, mem->constraints[ii]); config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, mem->constraints[ii]); config->constraints[ii]->memory_set_DCt_ptr(work->qp_in->DCt+ii, mem->constraints[ii]); config->constraints[ii]->memory_set_RSQrq_ptr(work->qp_in->RSQrq+ii, mem->constraints[ii]); config->constraints[ii]->memory_set_idxb_ptr(work->qp_in->idxb[ii], mem->constraints[ii]); config->constraints[ii]->memory_set_idxs_ptr(work->qp_in->idxs[ii], mem->constraints[ii]); } // alias to regularize memory config->regularize->memory_set_RSQrq_ptr(dims->regularize, work->qp_in->RSQrq, mem->regularize_mem); config->regularize->memory_set_rq_ptr(dims->regularize, work->qp_in->rqz, mem->regularize_mem); config->regularize->memory_set_BAbt_ptr(dims->regularize, work->qp_in->BAbt, mem->regularize_mem); config->regularize->memory_set_b_ptr(dims->regularize, work->qp_in->b, mem->regularize_mem); config->regularize->memory_set_idxb_ptr(dims->regularize, work->qp_in->idxb, mem->regularize_mem); config->regularize->memory_set_DCt_ptr(dims->regularize, work->qp_in->DCt, mem->regularize_mem); config->regularize->memory_set_ux_ptr(dims->regularize, work->qp_out->ux, mem->regularize_mem); config->regularize->memory_set_pi_ptr(dims->regularize, work->qp_out->pi, mem->regularize_mem); config->regularize->memory_set_lam_ptr(dims->regularize, work->qp_out->lam, mem->regularize_mem); // copy sampling times into dynamics model #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii < N; ii++) { config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); } #if defined(ACADOS_WITH_OPENMP) } // end of parallel region #endif // initialize QP initialize_qp(config, dims, nlp_in, nlp_out, opts, mem, work); // main sqp loop int sqp_iter = 0; for (; sqp_iter < opts->max_iter; sqp_iter++) { // printf("\n------- sqp iter %d (max_iter %d) --------\n", sqp_iter, opts->max_iter); // if(sqp_iter==2) // exit(1); // start timer acados_tic(&timer1); // linearizate NLP and update QP matrices linearize_update_qp_matrices(config, dims, nlp_in, nlp_out, opts, mem, work); // stop timer mem->time_lin += acados_toc(&timer1); // update QP rhs for SQP (step prim var, abs dual var) sqp_update_qp_vectors(config, dims, nlp_in, nlp_out, opts, mem, work); // compute nlp residuals ocp_nlp_res_compute(dims, nlp_in, nlp_out, mem->nlp_res, mem->nlp_mem); nlp_out->inf_norm_res = mem->nlp_res->inf_norm_res_g; nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_b > nlp_out->inf_norm_res) ? mem->nlp_res->inf_norm_res_b : nlp_out->inf_norm_res; nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_d > nlp_out->inf_norm_res) ? mem->nlp_res->inf_norm_res_d : nlp_out->inf_norm_res; nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_m > nlp_out->inf_norm_res) ? mem->nlp_res->inf_norm_res_m : nlp_out->inf_norm_res; // save statistics if (sqp_iter < mem->stat_m) { mem->stat[mem->stat_n*sqp_iter+0] = mem->nlp_res->inf_norm_res_g; mem->stat[mem->stat_n*sqp_iter+1] = mem->nlp_res->inf_norm_res_b; mem->stat[mem->stat_n*sqp_iter+2] = mem->nlp_res->inf_norm_res_d; mem->stat[mem->stat_n*sqp_iter+3] = mem->nlp_res->inf_norm_res_m; mem->stat[mem->stat_n*sqp_iter+4] = qp_status; mem->stat[mem->stat_n*sqp_iter+5] = qp_iter; } // exit conditions on residuals if ((mem->nlp_res->inf_norm_res_g < opts->tol_stat) & (mem->nlp_res->inf_norm_res_b < opts->tol_eq) & (mem->nlp_res->inf_norm_res_d < opts->tol_ineq) & (mem->nlp_res->inf_norm_res_m < opts->tol_comp)) { // printf("%d sqp iterations\n", sqp_iter); // print_ocp_qp_in(work->qp_in); // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // stop timer total_time += acados_toc(&timer0); // save time nlp_out->total_time = total_time; mem->time_tot = total_time; #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_SUCCESS; return mem->status; } // start timer acados_tic(&timer1); // regularize Hessian config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->regularize, mem->regularize_mem); // stop timer mem->time_reg += acados_toc(&timer1); // printf("\n------- qp_in (sqp iter %d) --------\n", sqp_iter); // print_ocp_qp_in(work->qp_in); // if(sqp_iter==1) // exit(1); // no warm start at first iteration if(sqp_iter==0) { int tmp_int = 0; config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "warm_start", &tmp_int); } // start timer acados_tic(&timer1); // TODO move qp_out in memory !!!!! (it has to be preserved to do warm start) qp_status = qp_solver->evaluate(qp_solver, work->qp_in, work->qp_out, opts->qp_solver_opts, mem->qp_solver_mem, work->qp_work); // stop timer mem->time_qp_sol += acados_toc(&timer1); // start timer acados_tic(&timer1); // compute correct dual solution in case of Hessian regularization config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->regularize, mem->regularize_mem); // stop timer mem->time_reg += acados_toc(&timer1); // restore default warm start if(sqp_iter==0) { config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "warm_start", &opts->qp_warm_start); } // TODO move into QP solver memory ??? nlp_out->qp_iter = ((ocp_qp_info *) work->qp_out->misc)->num_iter; qp_iter = ((ocp_qp_info *) work->qp_out->misc)->num_iter; // compute external QP residuals (for debugging) if(opts->ext_qp_res) { ocp_qp_res_compute(work->qp_in, work->qp_out, work->qp_res, work->qp_res_ws); ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+6)); // printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter, inf_norm_qp_res[0], inf_norm_qp_res[1], inf_norm_qp_res[2], inf_norm_qp_res[3]); } // printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter); // print_ocp_qp_out(work->qp_out); // if(sqp_iter==1) // exit(1); if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER)) { // print_ocp_qp_in(work->qp_in); // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // stop timer total_time += acados_toc(&timer0); // save time mem->time_tot = total_time; nlp_out->total_time = total_time; printf("QP solver returned error status %d in iteration %d\n", qp_status, sqp_iter); #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_QP_FAILURE; return mem->status; } sqp_update_variables(config, dims, nlp_out, opts, mem, work); // ocp_nlp_dims_print(nlp_out->dims); // ocp_nlp_out_print(nlp_out); // exit(1); // ??? @rien // for (int_t i = 0; i < N; i++) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme == NULL) // continue; // opts->sens_adj = (opts->scheme->type != exact); // if (nlp_in->freezeSens) { // // freeze inexact sensitivities after first SQP iteration !! // opts->scheme->freeze = true; // } // } } // stop timer total_time += acados_toc(&timer0); // ocp_nlp_out_print(nlp_out); // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // save time mem->time_tot = total_time; nlp_out->total_time = total_time; // printf("%d sqp iterations\n", sqp_iter); // print_ocp_qp_in(work->qp_in); // maximum number of iterations reached #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_MAXITER; return mem->status; } int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; // ocp_nlp_out *nlp_out = nlp_out_; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_work *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, work, mem, opts); // extract dims int N = dims->N; int status = ACADOS_SUCCESS; int ii; // TODO(fuck_lint) checks // TODO(fuck_lint) flag to enable/disable checks for (ii = 0; ii <= N; ii++) { // TODO(fuck_lint) check that ns in opt_var == ns in constraints } // precompute for (ii = 0; ii < N; ii++) { // set T config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); // dynamics precompute status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->dynamics[ii], mem->dynamics[ii], work->dynamics[ii]); if (status != ACADOS_SUCCESS) return status; } return status; } void ocp_nlp_sqp_get(void *config_, void *mem_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_memory *mem = mem_; if (!strcmp("sqp_iter", field)) { int *value = return_value_; *value = mem->sqp_iter; } else if (!strcmp("status", field)) { int *value = return_value_; *value = mem->status; } else if (!strcmp("time_tot", field) || !strcmp("tot_time", field)) { double *value = return_value_; *value = mem->time_tot; } else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field)) { double *value = return_value_; *value = mem->time_qp_sol; } else if (!strcmp("time_lin", field)) { double *value = return_value_; *value = mem->time_lin; } else if (!strcmp("time_reg", field)) { double *value = return_value_; *value = mem->time_reg; } else if (!strcmp("nlp_res", field)) { ocp_nlp_res **value = return_value_; *value = mem->nlp_res; } else if (!strcmp("stat", field)) { double **value = return_value_; *value = mem->stat; } else if (!strcmp("stat_m", field)) { int *value = return_value_; *value = mem->stat_m; } else if (!strcmp("stat_n", field)) { int *value = return_value_; *value = mem->stat_n; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field); exit(1); } } void ocp_nlp_sqp_config_initialize_default(void *config_) { ocp_nlp_config *config = (ocp_nlp_config *) config_; config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size; config->opts_assign = &ocp_nlp_sqp_opts_assign; config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default; config->opts_update = &ocp_nlp_sqp_opts_update; config->opts_set = &ocp_nlp_sqp_opts_set; config->dynamics_opts_set = &ocp_nlp_sqp_dynamics_opts_set; config->cost_opts_set = &ocp_nlp_sqp_cost_opts_set; config->constraints_opts_set = &ocp_nlp_sqp_constraints_opts_set; config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size; config->memory_assign = &ocp_nlp_sqp_memory_assign; config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size; config->evaluate = &ocp_nlp_sqp; config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default; config->precompute = &ocp_nlp_sqp_precompute; config->get = &ocp_nlp_sqp_get; return; }
Example_target.3.c
/* * @@name: target.3c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_4.0 */ extern void init(float*, float*, int); extern void output(float*, int); void vec_mult(int N) { int i; float p[N], v1[N], v2[N]; init(v1, v2, N); #pragma omp target map(to: v1, v2) map(from: p) #pragma omp parallel for for (i=0; i<N; i++) p[i] = v1[i] * v2[i]; output(p, N); }
ompnumthread.c
/* * $PIP_license: <Simplified BSD License> * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * $ * $RIKEN_copyright: Riken Center for Computational Sceience (R-CCS), * System Software Development Team, 2016-2021 * $ * $PIP_TESTSUITE: Version 1.1.0$ * * $Author: Atsushi Hori (R-CCS) mailto: ahori@riken.jp or ahori@me.com * $ */ #include <omp.h> #include <stdio.h> int nth; int main() { #pragma omp parallel { nth = omp_get_num_threads(); } printf( "%d\n", nth ); if( !nth ) return 1; return 0; }
utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file utils.h * \brief Basic utilility functions. */ #ifndef MXNET_COMMON_UTILS_H_ #define MXNET_COMMON_UTILS_H_ #include <dmlc/logging.h> #include <dmlc/omp.h> #include <nnvm/graph.h> #include <mxnet/engine.h> #include <mxnet/ndarray.h> #include <mxnet/op_attr_types.h> #include <mxnet/graph_attr_types.h> #include <nnvm/graph_attr_types.h> #include <memory> #include <vector> #include <type_traits> #include <utility> #include <random> #include <string> #include <thread> #include <algorithm> #include <functional> #include "../operator/mxnet_op.h" namespace mxnet { namespace common { /*! * \brief IndPtr should be non-negative, in non-decreasing order, start with 0 * and end with value equal with size of indices. */ struct csr_indptr_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr, const nnvm::dim_t end, const nnvm::dim_t idx_size) { if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] || (i == 0 && indptr[i] != 0) || (i == end - 1 && indptr[end] != idx_size)) *out = kCSRIndPtrErr; } }; /*! * \brief Indices should be non-negative, less than the number of columns * and in ascending order per row. */ struct csr_idx_check { template<typename DType, typename IType, typename RType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const RType* indptr, const nnvm::dim_t ncols) { for (RType j = indptr[i]; j < indptr[i+1]; j++) { if (idx[j] >= ncols || idx[j] < 0 || (j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) { *out = kCSRIdxErr; break; } } } }; /*! * \brief Indices of RSPNDArray should be non-negative, * less than the size of first dimension and in ascending order */ struct rsp_idx_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const nnvm::dim_t end, const nnvm::dim_t nrows) { if ((i < end && idx[i+1] <= idx[i]) || idx[i] < 0 || idx[i] >= nrows) *out = kRSPIdxErr; } }; template<typename xpu> void CheckFormatWrapper(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check); /*! * \brief Check the validity of CSRNDArray. * \param rctx Execution context. * \param input Input NDArray of CSRStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kCSRStorage) << "CheckFormatCSRImpl is for CSRNDArray"; const TShape shape = input.shape(); const TShape idx_shape = input.aux_shape(csr::kIdx); const TShape indptr_shape = input.aux_shape(csr::kIndPtr); const TShape storage_shape = input.storage_shape(); if ((shape.ndim() != 2) || (idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) || (indptr_shape[0] != shape[0] + 1) || (idx_shape[0] != storage_shape[0])) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kCSRShapeErr; }); return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), indptr_shape[0] - 1, idx_shape[0]); // no need to check indices if indices are empty if (idx_shape[0] != 0) { Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIdx).dptr<IType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]); } mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); }); } } /*! * \brief Check the validity of RowSparseNDArray. * \param rctx Execution context. * \param input Input NDArray of RowSparseStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kRowSparseStorage) << "CheckFormatRSPImpl is for RSPNDArray"; const TShape idx_shape = input.aux_shape(rowsparse::kIdx); if (idx_shape[0] != input.storage_shape()[0]) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kRSPShapeErr; }); return; } if (idx_shape[0] == 0) { return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0], val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(), idx_shape[0] - 1, input.shape()[0]); mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); } } template<typename xpu> void CheckFormatImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { int stype = input.storage_type(); if (stype == kCSRStorage) { CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kRowSparseStorage) { CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kDefaultStorage) { // no-op for default storage } else { LOG(FATAL) << "Unknown storage type " << stype; } } template<typename xpu> void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output); /*! \brief returns true if all storage types in `vstorage` are the same as target `stype`. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype) { if (!vstorage.empty()) { for (const auto& i : vstorage) { if (i != stype) return false; } return true; } return false; } /*! \brief returns true if all storage types in `vstorage` are the same as target `stype1` * or `stype2'. Sets boolean if both found. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!vstorage.empty()) { uint8_t has = 0; for (const auto i : vstorage) { if (i == stype1) { has |= 1; } else if (i == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as target `stype`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() != stype) { return false; } } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as targets `stype1` or `stype2`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!ndarrays.empty()) { uint8_t has = 0; for (const auto& nd : ndarrays) { const NDArrayStorageType stype = nd.storage_type(); if (stype == stype1) { has |= 1; } else if (stype == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief get string representation of dispatch_mode */ inline std::string dispatch_mode_string(const DispatchMode x) { switch (x) { case DispatchMode::kFCompute: return "fcompute"; case DispatchMode::kFComputeEx: return "fcompute_ex"; case DispatchMode::kFComputeFallback: return "fcompute_fallback"; case DispatchMode::kVariable: return "variable"; case DispatchMode::kUndefined: return "undefined"; } return "unknown"; } /*! \brief get string representation of storage_type */ inline std::string stype_string(const int x) { switch (x) { case kDefaultStorage: return "default"; case kCSRStorage: return "csr"; case kRowSparseStorage: return "row_sparse"; } return "unknown"; } // heuristic to dermine number of threads per GPU inline int GetNumThreadPerGPU() { // This is resource efficient option. return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2); } // heuristic to get number of matching colors. // this decides how much parallelism we can get in each GPU. inline int GetExecNumMatchColor() { // This is resource efficient option. int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1); return std::min(num_match_color, GetNumThreadPerGPU()); } template<typename T, typename V> V ParallelAccumulate(const T* a, const int n, V start) { V sum = start; #pragma omp parallel for reduction(+:sum) for (int i = 0; i < n; ++i) { sum += a[i]; } return sum; } /*! * \brief * Helper function for ParallelSort. * DO NOT call this function directly. * Use the interface ParallelSort instead. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) { if (len < grainsize) { std::sort(first, first+len, comp); } else { std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp); ParallelSortHelper(first+len/2, len - len/2, grainsize, comp); thr.join(); std::inplace_merge(first, first+len/2, first+len, comp); } } /*! * \brief * Sort the elements in the range [first, last) into the ascending order defined by * the comparator comp. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) { const auto num = std::distance(first, last); size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16)); ParallelSortHelper(first, num, grainsize, comp); } /*! * \brief * Sort the elements in the range [first, last) into ascending order. * The elements are compared using the default < operator. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) { ParallelSort(first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>()); } /*! * \brief Random Engine */ typedef std::mt19937 RANDOM_ENGINE; /*! * \brief Helper functions. */ namespace helper { /*! * \brief Helper for non-array type `T`. */ template <class T> struct UniqueIf { /*! * \brief Type of `T`. */ using SingleObject = std::unique_ptr<T>; }; /*! * \brief Helper for an array of unknown bound `T`. */ template <class T> struct UniqueIf<T[]> { /*! * \brief Type of `T`. */ using UnknownBound = std::unique_ptr<T[]>; }; /*! * \brief Helper for an array of known bound `T`. */ template <class T, size_t kSize> struct UniqueIf<T[kSize]> { /*! * \brief Type of `T`. */ using KnownBound = void; }; } // namespace helper /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs a non-array type `T`. The arguments `args` are passed to the * constructor of `T`. The function does not participate in the overload * resolution if `T` is an array type. */ template <class T, class... Args> typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param n The size of the array to construct. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs an array of unknown bound `T`. The function does not participate * in the overload resolution unless `T` is an array of unknown bound. */ template <class T> typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) { using U = typename std::remove_extent<T>::type; return std::unique_ptr<T>(new U[n]{}); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * * Constructs an arrays of known bound is disallowed. */ template <class T, class... Args> typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete; template<typename FCompType> FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) { static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>"); static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>"); if (ctx.dev_mask() == cpu::kDevMask) { return fcompute_cpu.get(op, nullptr); } else if (ctx.dev_mask() == gpu::kDevMask) { return fcompute_gpu.get(op, nullptr); } else { LOG(FATAL) << "Unknown device mask"; return nullptr; } } } // namespace common } // namespace mxnet #endif // MXNET_COMMON_UTILS_H_
2mm.c
/** * 2mm.c: This file is part of the PolyBench/C 3.2 test suite. * * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net */ #include "util.h" /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "2mm.h" /* Global arguments */ int ni, nj, nk, nl, T = 1; double g_alpha, g_beta; double **g_tmp; double **g_A; double **g_B; double **g_C; double **g_D; /* Create barrier */ pthread_barrier_t barrier; sem_t mutex; /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, double *alpha, double *beta, double POLYBENCH_2D(A,NI,NK,ni,nl), double POLYBENCH_2D(B,NK,NJ,nk,nj), double POLYBENCH_2D(C,NL,NJ,nl,nj), double POLYBENCH_2D(D,NI,NL,ni,nl), double POLYBENCH_2D(tmp,NI,NJ,ni,nj)) { *alpha = 32412; *beta = 2123; g_A = (double **) malloc(sizeof(double) * NI); g_B = (double **) malloc(sizeof(double) * NI); g_C = (double **) malloc(sizeof(double) * NI); g_D = (double **) malloc(sizeof(double) * NI); g_tmp = (double **) malloc(sizeof(double) * NI); for (int i = 0; i < ni; i++) { g_A[i] = A[i]; for (int j = 0; j < nk; j++) { A[i][j] = ((double) i*j) / ni; g_A[i][j] = A[i][j]; } } for (int i = 0; i < nk; i++) { g_B[i] = B[i]; for (int j = 0; j < nj; j++) { B[i][j] = ((double) i*(j+1)) / nj; g_B[i][j] = B[i][j]; } } for (int i = 0; i < nl; i++) { g_C[i] = C[i]; for (int j = 0; j < nj; j++) { C[i][j] = ((double) i*(j+3)) / nl; g_C[i][j] = C[i][j]; } } for (int i = 0; i < ni; i++) { g_D[i] = D[i]; for (int j = 0; j < nl; j++) { D[i][j] = ((double) i*(j+2)) / nk; g_D[i][j] = D[i][j]; } } for (int i = 0; i < ni; i++) { g_tmp[i] = tmp[i]; for (int j = 0; j < nj; j++) { g_tmp[i][j] = tmp[i][j]; } } } static void *kernel_2mm_pthreads(void *arg) { #ifdef PAPI PAPI_THREAD_INIT() #endif int *id = (int *) arg; int stripe = get_stripe(T); int init = get_init(*id, stripe); int end = get_end(init, stripe); for (int i = init; i < end; i++) { for (int j = 0; j < NJ; j++) { g_tmp[i][j] = 0; for (int k = 0; k < NK; ++k) { g_tmp[i][j] += g_alpha * g_A[i][k] * g_B[k][j]; } } } pthread_barrier_wait(&barrier); for (int i = init; i < end; i++) { for (int j = 0; j < NJ; j++) { g_D[i][j] *= g_beta; for (int k = 0; k < NJ; ++k) { g_D[i][j] += g_tmp[i][k] * g_C[k][j]; } } } #ifdef PAPI sem_wait(&mutex); PAPI_UPDATE() sem_post(&mutex); #endif return NULL; } /* Main computational kernel. The whole function will be timed, including the call and return. */ /* Parallelized using OpenMP */ static void kernel_2mm_openmp() { /* D := alpha*A*B*C + beta*D */ /* A[i][j] */ /* i -> line */ /* j -> column */ #pragma omp parallel num_threads(T) { #pragma omp for simd for (int i = 0; i < ni; i++) { for (int j = 0; j < nj; j++) { g_tmp[i][j] = 0; for (int k = 0; k < nk; ++k) { g_tmp[i][j] += g_alpha * g_A[i][k] * g_B[k][j]; } } } #pragma omp for simd for (int i = 0; i < ni; i++) { for (int j = 0; j < nl; j++) { g_D[i][j] *= g_beta; for (int k = 0; k < nj; ++k) { g_D[i][j] += g_tmp[i][k] * g_C[k][j]; } } } } } /* Main computational kernel. The whole function will be timed, including the call and return. */ /* Original sequential code */ static void kernel_2mm() { /* D := alpha*A*B*C + beta*D */ /* A[i][j] */ /* i -> line */ /* j -> column */ for (int i = 0; i < ni; i++) { for (int j = 0; j < nj; j++) { g_tmp[i][j] = 0; for (int k = 0; k < nk; ++k) { g_tmp[i][j] += g_alpha * g_A[i][k] * g_B[k][j]; } } } for(int i = 0; i < ni; i++) { for (int j = 0; j < nl; j++) { g_D[i][j] *= g_beta; for (int k = 0; k < nj; ++k) { g_D[i][j] += g_tmp[i][k] * g_C[k][j]; } } } } int main(int argc, char** argv) { /* Retrieve which code to run */ int prog = atoi(argv[2]); /* Retrieve problem size. */ ni = NI; nj = NJ; nk = NK; nl = NL; /* Number of threads */ T = atoi(argv[1]); /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(tmp,double,NI,NJ,ni,nj); POLYBENCH_2D_ARRAY_DECL(A,double,NI,NK,ni,nk); POLYBENCH_2D_ARRAY_DECL(B,double,NK,NJ,nk,nj); POLYBENCH_2D_ARRAY_DECL(C,double,NL,NJ,nl,nj); POLYBENCH_2D_ARRAY_DECL(D,double,NI,NL,ni,nl); // Initialize array(s). init_array(ni, nj, nk, nl, &g_alpha, &g_beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(tmp)); TIME() switch(prog) { case 0: { #ifdef PAPI START_PAPI() #endif // Run sequential kernel kernel_2mm(); break; } case 1: { // Threads declaration pthread_t threads[T]; #ifdef PAPI PAPI_thread_init(pthread_self); START_PAPI() #endif // Initialize sync tools pthread_barrier_init(&barrier, NULL, T); sem_init(&mutex, 0, 1); // Start threads start_pthread(threads, T, kernel_2mm_pthreads); break; } case 2: { #ifdef PAPI START_PAPI() #endif // Run OpenMP kernel. kernel_2mm_openmp(); break; } default: break; } #ifdef PAPI ENDPAPI() #endif ENDTIME() /* Be clean. */ POLYBENCH_FREE_ARRAY(tmp); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); return 0; }
depobj.c
// Example for the depobj construct // From: https://www.openmp.org/wp-content/uploads/openmp-examples-5.0.0.pdf #include <stdio.h> #include <omp.h> #define N 100 #define TRUE 1 #define FALSE 0 void driver(int update, float a[], float b[], int n, omp_depend_t *obj); void update_copy(int update, float a[], float b[], int n); void checkpoint(float a[], int n); void init(float a[], int n); int main() { float a[N], b[N]; omp_depend_t obj; init(a, N); #pragma omp depobj(obj) depend(inout: a) driver(TRUE, a, b, N, &obj); // updating a occurs #pragma omp depobj(obj) update(in) driver(FALSE, a, b, N, &obj); // no updating of a // obj is set to uninitilized state, sources are freed #pragma omp depobj(obj) destroy return 0; } void driver(int update, float a[], float b[], int n, omp_depend_t *obj) { #pragma omp parallel num_threads(2) #pragma omp single { #pragma omp task depend(depobj: obj) // Task 1, uses depend object update_copy(update, a, b, n); #pragma omp task depend(in: a[0:n]) // Task 2, only read a checkpoint(a, n); } } void update_copy(int update, float a[], float b[], int n) { if (update) for (int i = 0; i<n; i++) a[i] += 1.0f; for (int i = 0; i<n; i++) b[i] = a[i]; } void checkpoint(float a[], int n) { for (int i = 0; i<n; i++) printf(" %f ", a[i]); printf("\n"); } void init(float a[], int n) { for (int i = 0; i<n; i++) a[i] = i; }
GB_unop__signum_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__signum_fp32_fp32) // op(A') function: GB (_unop_tran__signum_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = GB_signumf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_signumf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = GB_signumf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIGNUM || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__signum_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = GB_signumf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = GB_signumf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__signum_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
crop.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_CROP_H_ #define MACE_KERNELS_CROP_H_ #include <memory> #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/core/types.h" #include "mace/kernels/kernel.h" #include "mace/public/mace.h" namespace mace { namespace kernels { template <DeviceType D, typename T> struct CropFunctor : OpKernel { CropFunctor(OpKernelContext *context, const int axis, const std::vector<int> &offset) : OpKernel(context), axis_(axis), offset_(offset) {} void crop_copy(const T* input_data, T* output_data, const std::vector<index_t> &input_shape, const std::vector<index_t> &output_shape, const int32_t* offsets) { const index_t out_img_size = output_shape[1] * output_shape[2] * output_shape[3]; const index_t out_hw = output_shape[2] * output_shape[3]; const index_t in_img_size = input_shape[1] * input_shape[2] * input_shape[3]; const index_t in_hw = input_shape[2] * input_shape[3]; #pragma omp parallel for collapse(3) for (int b = 0; b < output_shape[0]; ++b) { for (int c = 0; c < output_shape[1]; ++c) { for (int h = 0; h < output_shape[2]; ++h) { T* out_ptr = output_data + b * out_img_size + c * out_hw + h * output_shape[3]; const T* in_ptr_bch = input_data + (b + offsets[0]) * in_img_size + (c + offsets[1]) * in_hw + (h + offsets[2]) * input_shape[3] + offsets[3]; memcpy(out_ptr, in_ptr_bch, output_shape[3] * sizeof(T)); } } } } MaceStatus operator()(const std::vector<const Tensor *> &input_list, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); MACE_CHECK(input_list.size() == 2, "Crop op needs two inputs."); const Tensor *input0 = input_list[0]; const Tensor *input1 = input_list[1]; const uint32_t in0_dims = static_cast<uint32_t >(input0->dim_size()); const uint32_t in1_dims = static_cast<uint32_t >(input0->dim_size()); MACE_CHECK(in0_dims == 4 && in1_dims == 4, "crop op only supports 4-dims inputs now."); std::vector<int32_t> offsets(in0_dims, 0); std::vector<index_t> output_shape(input0->shape()); for (index_t i = 0; i < in0_dims; ++i) { int32_t crop_offset = 0; index_t new_size = input0->dim(i); if (i >= axis_) { new_size = input1->dim(i); if (offset_.size() == 1) { crop_offset = offset_[0]; } else if (offset_.size() > 1) { crop_offset = offset_[i - axis_]; } MACE_CHECK(input0->dim(i) - crop_offset >= input1->dim(i)) << "the crop for dimension" << i << "is out of bound with size" << input1->dim(i) << "and offset" << crop_offset; } output_shape[i] = new_size; offsets[i] = crop_offset; } MACE_RETURN_IF_ERROR(output->Resize(output_shape)); T *output_data = output->mutable_data<T>(); const T * input_data = input0->data<T>(); crop_copy(input_data, output_data, input0->shape(), output_shape, offsets.data()); return MACE_SUCCESS; } const int axis_; std::vector<int> offset_; }; #ifdef MACE_ENABLE_OPENCL class OpenCLCropKernel { public: virtual MaceStatus Compute( OpKernelContext *context, const std::vector<const Tensor *> &input_list, Tensor *output, StatsFuture *future) = 0; MACE_VIRTUAL_EMPTY_DESTRUCTOR(OpenCLCropKernel); }; template <typename T> struct CropFunctor<DeviceType::GPU, T> : OpKernel { CropFunctor(OpKernelContext *context, const int axis, const std::vector<int> &offset); MaceStatus operator()(const std::vector<const Tensor *> &input_list, Tensor *output, StatsFuture *future); std::unique_ptr<OpenCLCropKernel> kernel_; }; #endif // MACE_ENABLE_OPENCL } // namespace kernels } // namespace mace #endif // MACE_KERNELS_CROP_H_
pr35625.c
/* PR libgomp/35625 */ /* { dg-do run } */ /* { dg-options "-std=c99" } */ int main (void) { #pragma omp parallel { #pragma omp for schedule (guided, 10) for (int i = 0; i < 1826; i += 10) ; #pragma omp for schedule (guided, 10) for (int i = 0; i > -1826; i -= 10) ; } return 0; }
axhelm.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void axhelm_v0(const dlong & Nelements, const dlong & offset, const dfloat* __restrict__ ggeo, const dfloat* __restrict__ D, const dfloat* __restrict__ lambda, const dfloat* __restrict__ q, dfloat* __restrict__ Aq ) { dfloat s_q[p_Nq][p_Nq][p_Nq]; dfloat s_Gqr[p_Nq][p_Nq][p_Nq]; dfloat s_Gqs[p_Nq][p_Nq][p_Nq]; dfloat s_Gqt[p_Nq][p_Nq][p_Nq]; dfloat s_D[p_Nq][p_Nq]; for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) s_D[j][i] = D[j * p_Nq + i]; #pragma omp parallel for private(s_q, s_Gqr, s_Gqs, s_Gqt) for(dlong e = 0; e < Nelements; ++e) { const dlong element = e; for(int k = 0; k < p_Nq; k++) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong base = i + j * p_Nq + k * p_Nq * p_Nq + element * p_Np; const dfloat qbase = q[base]; s_q[k][j][i] = qbase; } for(int k = 0; k < p_Nq; ++k) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong id = i + j * p_Nq + k * p_Nq * p_Nq + element * p_Np; const dlong gbase = element * p_Nggeo * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat r_G00 = ggeo[gbase + p_G00ID * p_Np]; const dfloat r_G01 = ggeo[gbase + p_G01ID * p_Np]; const dfloat r_G11 = ggeo[gbase + p_G11ID * p_Np]; const dfloat r_G12 = ggeo[gbase + p_G12ID * p_Np]; const dfloat r_G02 = ggeo[gbase + p_G02ID * p_Np]; const dfloat r_G22 = ggeo[gbase + p_G22ID * p_Np]; const dfloat r_lam0 = lambda[id]; dfloat qr = 0.f; dfloat qs = 0.f; dfloat qt = 0.f; for(int m = 0; m < p_Nq; m++) { qr += s_D[i][m] * s_q[k][j][m]; qs += s_D[j][m] * s_q[k][m][i]; qt += s_D[k][m] * s_q[m][j][i]; } dfloat Gqr = r_G00 * qr + r_G01 * qs + r_G02 * qt; dfloat Gqs = r_G01 * qr + r_G11 * qs + r_G12 * qt; dfloat Gqt = r_G02 * qr + r_G12 * qs + r_G22 * qt; s_Gqr[k][j][i] = r_lam0 * Gqr; s_Gqs[k][j][i] = r_lam0 * Gqs; s_Gqt[k][j][i] = r_lam0 * Gqt; } for(int k = 0; k < p_Nq; k++) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong id = element * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dlong gbase = element * p_Nggeo * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat r_GwJ = ggeo[gbase + p_GWJID * p_Np]; const dfloat r_lam1 = lambda[id + offset]; const dfloat r_Aq = r_GwJ * r_lam1 * s_q[k][j][i]; dfloat r_Aqr = 0, r_Aqs = 0, r_Aqt = 0; for(int m = 0; m < p_Nq; m++) { r_Aqr += s_D[m][i] * s_Gqr[k][j][m]; r_Aqs += s_D[m][j] * s_Gqs[k][m][i]; r_Aqt += s_D[m][k] * s_Gqt[m][j][i]; } Aq[id] = r_Aqr + r_Aqs + r_Aqt + r_Aq; } } } extern "C" void axhelm_n3_v0(const dlong & Nelements, const dlong & offset, const dfloat* __restrict__ ggeo, const dfloat* __restrict__ D, const dfloat* __restrict__ lambda, const dfloat* __restrict__ q, dfloat* __restrict__ Aq ) { dfloat s_q[3][p_Nq][p_Nq][p_Nq]; dfloat s_Gqr[3][p_Nq][p_Nq][p_Nq]; dfloat s_Gqs[3][p_Nq][p_Nq][p_Nq]; dfloat s_Gqt[3][p_Nq][p_Nq][p_Nq]; dfloat s_D[p_Nq][p_Nq]; for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) s_D[j][i] = D[j * p_Nq + i]; #pragma omp parallel for private(s_q, s_Gqr, s_Gqs, s_Gqt) for(dlong e = 0; e < Nelements; ++e) { const dlong element = e; for(int k = 0; k < p_Nq; k++) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong base = i + j * p_Nq + k * p_Nq * p_Nq + element * p_Np; s_q[0][k][j][i] = q[base + 0 * offset]; s_q[1][k][j][i] = q[base + 1 * offset]; s_q[2][k][j][i] = q[base + 2 * offset]; } for(int k = 0; k < p_Nq; ++k) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong id = element * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dlong gbase = element * p_Nggeo * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat r_G00 = ggeo[gbase + p_G00ID * p_Np]; const dfloat r_G01 = ggeo[gbase + p_G01ID * p_Np]; const dfloat r_G11 = ggeo[gbase + p_G11ID * p_Np]; const dfloat r_G12 = ggeo[gbase + p_G12ID * p_Np]; const dfloat r_G02 = ggeo[gbase + p_G02ID * p_Np]; const dfloat r_G22 = ggeo[gbase + p_G22ID * p_Np]; const dfloat r_lam0 = lambda[id]; dfloat qr0 = 0.f, qr1 = 0.f, qr2 = 0.f; dfloat qs0 = 0.f, qs1 = 0.f, qs2 = 0.f; dfloat qt0 = 0.f, qt1 = 0.f, qt2 = 0.f; for(int m = 0; m < p_Nq; m++) { qr0 += s_D[i][m] * s_q[0][k][j][m]; qs0 += s_D[j][m] * s_q[0][k][m][i]; qt0 += s_D[k][m] * s_q[0][m][j][i]; // qr1 += s_D[i][m] * s_q[1][k][j][m]; qs1 += s_D[j][m] * s_q[1][k][m][i]; qt1 += s_D[k][m] * s_q[1][m][j][i]; // qr2 += s_D[i][m] * s_q[2][k][j][m]; qs2 += s_D[j][m] * s_q[2][k][m][i]; qt2 += s_D[k][m] * s_q[2][m][j][i]; } s_Gqr[0][k][j][i] = r_lam0 * (r_G00 * qr0 + r_G01 * qs0 + r_G02 * qt0); s_Gqs[0][k][j][i] = r_lam0 * (r_G01 * qr0 + r_G11 * qs0 + r_G12 * qt0); s_Gqt[0][k][j][i] = r_lam0 * (r_G02 * qr0 + r_G12 * qs0 + r_G22 * qt0); s_Gqr[1][k][j][i] = r_lam0 * (r_G00 * qr1 + r_G01 * qs1 + r_G02 * qt1); s_Gqs[1][k][j][i] = r_lam0 * (r_G01 * qr1 + r_G11 * qs1 + r_G12 * qt1); s_Gqt[1][k][j][i] = r_lam0 * (r_G02 * qr1 + r_G12 * qs1 + r_G22 * qt1); s_Gqr[2][k][j][i] = r_lam0 * (r_G00 * qr2 + r_G01 * qs2 + r_G02 * qt2); s_Gqs[2][k][j][i] = r_lam0 * (r_G01 * qr2 + r_G11 * qs2 + r_G12 * qt2); s_Gqt[2][k][j][i] = r_lam0 * (r_G02 * qr2 + r_G12 * qs2 + r_G22 * qt2); } for(int k = 0; k < p_Nq; k++) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong gbase = element * p_Nggeo * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat r_GwJ = ggeo[gbase + p_GWJID * p_Np]; const dlong id = element * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat r_lam0 = lambda[id + offset]; dfloat r_Aq0 = r_GwJ * r_lam0 * s_q[0][k][j][i]; dfloat r_Aq1 = r_GwJ * r_lam0 * s_q[1][k][j][i]; dfloat r_Aq2 = r_GwJ * r_lam0 * s_q[2][k][j][i]; dfloat r_Aqr0 = 0, r_Aqs0 = 0, r_Aqt0 = 0; dfloat r_Aqr1 = 0, r_Aqs1 = 0, r_Aqt1 = 0; dfloat r_Aqr2 = 0, r_Aqs2 = 0, r_Aqt2 = 0; for(int m = 0; m < p_Nq; m++) { r_Aqr0 += s_D[m][i] * s_Gqr[0][k][j][m]; r_Aqr1 += s_D[m][i] * s_Gqr[1][k][j][m]; r_Aqr2 += s_D[m][i] * s_Gqr[2][k][j][m]; r_Aqs0 += s_D[m][j] * s_Gqs[0][k][m][i]; r_Aqs1 += s_D[m][j] * s_Gqs[1][k][m][i]; r_Aqs2 += s_D[m][j] * s_Gqs[2][k][m][i]; r_Aqt0 += s_D[m][k] * s_Gqt[0][m][j][i]; r_Aqt1 += s_D[m][k] * s_Gqt[1][m][j][i]; r_Aqt2 += s_D[m][k] * s_Gqt[2][m][j][i]; } Aq[id + 0 * offset] = r_Aqr0 + r_Aqs0 + r_Aqt0 + r_Aq0; Aq[id + 1 * offset] = r_Aqr1 + r_Aqs1 + r_Aqt1 + r_Aq1; Aq[id + 2 * offset] = r_Aqr2 + r_Aqs2 + r_Aqt2 + r_Aq2; } } } extern "C" void axhelm_bk_v0(const dlong & Nelements, const dlong & offset, const dfloat* __restrict__ ggeo, const dfloat* __restrict__ D, const dfloat* __restrict__ lambda, const dfloat* __restrict__ q, dfloat* __restrict__ Aq ) { dfloat s_q[p_Nq][p_Nq][p_Nq]; dfloat s_Gqr[p_Nq][p_Nq][p_Nq]; dfloat s_Gqs[p_Nq][p_Nq][p_Nq]; dfloat s_Gqt[p_Nq][p_Nq][p_Nq]; dfloat s_D[p_Nq][p_Nq]; for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) s_D[j][i] = D[j * p_Nq + i]; #pragma omp parallel for private(s_q, s_Gqr, s_Gqs, s_Gqt) for(dlong e = 0; e < Nelements; ++e) { const dlong element = e; for(int k = 0; k < p_Nq; k++) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong base = i + j * p_Nq + k * p_Nq * p_Nq + element * p_Np; const dfloat qbase = q[base]; s_q[k][j][i] = qbase; } for(int k = 0; k < p_Nq; ++k) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong id = i + j * p_Nq + k * p_Nq * p_Nq + element * p_Np; const dlong gbase = element * p_Nggeo * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat r_G00 = ggeo[gbase + p_G00ID * p_Np]; const dfloat r_G01 = ggeo[gbase + p_G01ID * p_Np]; const dfloat r_G11 = ggeo[gbase + p_G11ID * p_Np]; const dfloat r_G12 = ggeo[gbase + p_G12ID * p_Np]; const dfloat r_G02 = ggeo[gbase + p_G02ID * p_Np]; const dfloat r_G22 = ggeo[gbase + p_G22ID * p_Np]; dfloat qr = 0.f; dfloat qs = 0.f; dfloat qt = 0.f; for(int m = 0; m < p_Nq; m++) { qr += s_D[i][m] * s_q[k][j][m]; qs += s_D[j][m] * s_q[k][m][i]; qt += s_D[k][m] * s_q[m][j][i]; } s_Gqr[k][j][i] = r_G00 * qr + r_G01 * qs + r_G02 * qt; s_Gqs[k][j][i] = r_G01 * qr + r_G11 * qs + r_G12 * qt; s_Gqt[k][j][i] = r_G02 * qr + r_G12 * qs + r_G22 * qt; } for(int k = 0; k < p_Nq; k++) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong id = element * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dlong gbase = element * p_Nggeo * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat r_GwJ = ggeo[gbase + p_GWJID * p_Np]; dfloat r_Aqr = 0, r_Aqs = 0, r_Aqt = 0; for(int m = 0; m < p_Nq; m++) { r_Aqr += s_D[m][i] * s_Gqr[k][j][m]; r_Aqs += s_D[m][j] * s_Gqs[k][m][i]; r_Aqt += s_D[m][k] * s_Gqt[m][j][i]; } Aq[id] = r_Aqr + r_Aqs + r_Aqt; } } } extern "C" void axhelm_bk_n3_v0(const dlong & Nelements, const dlong & offset, const dfloat* __restrict__ ggeo, const dfloat* __restrict__ D, const dfloat* __restrict__ lambda, const dfloat* __restrict__ q, dfloat* __restrict__ Aq ) { dfloat s_q[3][p_Nq][p_Nq][p_Nq]; dfloat s_Gqr[3][p_Nq][p_Nq][p_Nq]; dfloat s_Gqs[3][p_Nq][p_Nq][p_Nq]; dfloat s_Gqt[3][p_Nq][p_Nq][p_Nq]; dfloat s_D[p_Nq][p_Nq]; for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) s_D[j][i] = D[j * p_Nq + i]; #pragma omp parallel for private(s_q, s_Gqr, s_Gqs, s_Gqt) for(dlong e = 0; e < Nelements; ++e) { const dlong element = e; for(int k = 0; k < p_Nq; k++) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong base = i + j * p_Nq + k * p_Nq * p_Nq + element * p_Np; s_q[0][k][j][i] = q[base + 0 * offset]; s_q[1][k][j][i] = q[base + 1 * offset]; s_q[2][k][j][i] = q[base + 2 * offset]; } for(int k = 0; k < p_Nq; ++k) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong id = element * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dlong gbase = element * p_Nggeo * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat r_G00 = ggeo[gbase + p_G00ID * p_Np]; const dfloat r_G01 = ggeo[gbase + p_G01ID * p_Np]; const dfloat r_G11 = ggeo[gbase + p_G11ID * p_Np]; const dfloat r_G12 = ggeo[gbase + p_G12ID * p_Np]; const dfloat r_G02 = ggeo[gbase + p_G02ID * p_Np]; const dfloat r_G22 = ggeo[gbase + p_G22ID * p_Np]; dfloat qr0 = 0.f, qr1 = 0.f, qr2 = 0.f; dfloat qs0 = 0.f, qs1 = 0.f, qs2 = 0.f; dfloat qt0 = 0.f, qt1 = 0.f, qt2 = 0.f; for(int m = 0; m < p_Nq; m++) { qr0 += s_D[i][m] * s_q[0][k][j][m]; qs0 += s_D[j][m] * s_q[0][k][m][i]; qt0 += s_D[k][m] * s_q[0][m][j][i]; // qr1 += s_D[i][m] * s_q[1][k][j][m]; qs1 += s_D[j][m] * s_q[1][k][m][i]; qt1 += s_D[k][m] * s_q[1][m][j][i]; // qr2 += s_D[i][m] * s_q[2][k][j][m]; qs2 += s_D[j][m] * s_q[2][k][m][i]; qt2 += s_D[k][m] * s_q[2][m][j][i]; } s_Gqr[0][k][j][i] = (r_G00 * qr0 + r_G01 * qs0 + r_G02 * qt0); s_Gqs[0][k][j][i] = (r_G01 * qr0 + r_G11 * qs0 + r_G12 * qt0); s_Gqt[0][k][j][i] = (r_G02 * qr0 + r_G12 * qs0 + r_G22 * qt0); s_Gqr[1][k][j][i] = (r_G00 * qr1 + r_G01 * qs1 + r_G02 * qt1); s_Gqs[1][k][j][i] = (r_G01 * qr1 + r_G11 * qs1 + r_G12 * qt1); s_Gqt[1][k][j][i] = (r_G02 * qr1 + r_G12 * qs1 + r_G22 * qt1); s_Gqr[2][k][j][i] = (r_G00 * qr2 + r_G01 * qs2 + r_G02 * qt2); s_Gqs[2][k][j][i] = (r_G01 * qr2 + r_G11 * qs2 + r_G12 * qt2); s_Gqt[2][k][j][i] = (r_G02 * qr2 + r_G12 * qs2 + r_G22 * qt2); } for(int k = 0; k < p_Nq; k++) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong gbase = element * p_Nggeo * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat r_GwJ = ggeo[gbase + p_GWJID * p_Np]; const dlong id = element * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; dfloat r_Aqr0 = 0, r_Aqs0 = 0, r_Aqt0 = 0; dfloat r_Aqr1 = 0, r_Aqs1 = 0, r_Aqt1 = 0; dfloat r_Aqr2 = 0, r_Aqs2 = 0, r_Aqt2 = 0; for(int m = 0; m < p_Nq; m++) { r_Aqr0 += s_D[m][i] * s_Gqr[0][k][j][m]; r_Aqr1 += s_D[m][i] * s_Gqr[1][k][j][m]; r_Aqr2 += s_D[m][i] * s_Gqr[2][k][j][m]; r_Aqs0 += s_D[m][j] * s_Gqs[0][k][m][i]; r_Aqs1 += s_D[m][j] * s_Gqs[1][k][m][i]; r_Aqs2 += s_D[m][j] * s_Gqs[2][k][m][i]; r_Aqt0 += s_D[m][k] * s_Gqt[0][m][j][i]; r_Aqt1 += s_D[m][k] * s_Gqt[1][m][j][i]; r_Aqt2 += s_D[m][k] * s_Gqt[2][m][j][i]; } Aq[id + 0 * offset] = r_Aqr0 + r_Aqs0 + r_Aqt0; Aq[id + 1 * offset] = r_Aqr1 + r_Aqs1 + r_Aqt1; Aq[id + 2 * offset] = r_Aqr2 + r_Aqs2 + r_Aqt2; } } }
pr88203-2.c
/* PR c++/88203 */ /* { dg-do compile } */ /* { dg-additional-options "-std=gnu99" { target c } } */ /* { dg-additional-options "-std=gnu++11" { target c++ } } */ void foo (const char *, const char *); #pragma omp declare target to (foo) void f1 (void) { #pragma omp parallel default(none) foo (__FUNCTION__, __PRETTY_FUNCTION__); } void f2 (void) { #pragma omp parallel default(none) shared(__FUNCTION__, __PRETTY_FUNCTION__) foo (__FUNCTION__, __PRETTY_FUNCTION__); #pragma omp parallel default(none) shared(__FUNCTION__) firstprivate(__PRETTY_FUNCTION__) foo (__FUNCTION__, __PRETTY_FUNCTION__); } void f3 (void) { #pragma omp parallel default(none) firstprivate(__FUNCTION__, __PRETTY_FUNCTION__) foo (__FUNCTION__, __PRETTY_FUNCTION__); #pragma omp parallel default(none) firstprivate(__FUNCTION__), shared(__PRETTY_FUNCTION__) foo (__FUNCTION__, __PRETTY_FUNCTION__); } void f4 (void) { foo (__FUNCTION__, __PRETTY_FUNCTION__); #pragma omp parallel default(none) foo (__FUNCTION__, __PRETTY_FUNCTION__); } void f5 (void) { foo (__FUNCTION__, __PRETTY_FUNCTION__); #pragma omp parallel default(none) shared(__FUNCTION__, __PRETTY_FUNCTION__) foo (__FUNCTION__, __PRETTY_FUNCTION__); } void f6 (void) { foo (__FUNCTION__, __PRETTY_FUNCTION__); #pragma omp parallel default(none) firstprivate(__FUNCTION__, __PRETTY_FUNCTION__) foo (__FUNCTION__, __PRETTY_FUNCTION__); } void f7 (void) { #pragma omp target map(to: __FUNCTION__, __PRETTY_FUNCTION__) foo (__FUNCTION__, __PRETTY_FUNCTION__); #pragma omp task depend(inout:__FUNCTION__, __PRETTY_FUNCTION__) foo (__FUNCTION__, __PRETTY_FUNCTION__); }
irbuilder_nested_parallel_for.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -verify -fopenmp -fopenmp-enable-irbuilder -x c++ -emit-llvm %s -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefixes=CHECK %s // RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -verify %s -emit-llvm -o - | FileCheck --check-prefixes=CHECK-DEBUG %s // expected-no-diagnostics // TODO: Teach the update script to check new functions too. #ifndef HEADER #define HEADER // CHECK-LABEL: @_Z14parallel_for_0v( // CHECK-NEXT: entry: // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB1:@.*]]) // CHECK-NEXT: br label [[OMP_PARALLEL:%.*]] // CHECK: omp_parallel: // CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @_Z14parallel_for_0v..omp_par to void (i32*, i32*, ...)*)) // CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]] // CHECK: omp.par.outlined.exit: // CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] // CHECK: omp.par.exit.split: // CHECK-NEXT: ret void // // CHECK-DEBUG-LABEL: @_Z14parallel_for_0v( // CHECK-DEBUG-NEXT: entry: // CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB1:@.*]]), [[DBG12:!dbg !.*]] // CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]] // CHECK-DEBUG: omp_parallel: // CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @_Z14parallel_for_0v..omp_par to void (i32*, i32*, ...)*)), [[DBG13:!dbg !.*]] // CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]] // CHECK-DEBUG: omp.par.outlined.exit: // CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] // CHECK-DEBUG: omp.par.exit.split: // CHECK-DEBUG-NEXT: ret void, [[DBG17:!dbg !.*]] // void parallel_for_0(void) { #pragma omp parallel { #pragma omp for for (int i = 0; i < 100; ++i) { } } } // CHECK-LABEL: @_Z14parallel_for_1Pfid( // CHECK-NEXT: entry: // CHECK-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8 // CHECK-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8 // CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4 // CHECK-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8 // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB1]]) // CHECK-NEXT: br label [[OMP_PARALLEL:%.*]] // CHECK: omp_parallel: // CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double*, float**)* @_Z14parallel_for_1Pfid..omp_par.4 to void (i32*, i32*, ...)*), i32* [[A_ADDR]], double* [[B_ADDR]], float** [[R_ADDR]]) // CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT16:%.*]] // CHECK: omp.par.outlined.exit16: // CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] // CHECK: omp.par.exit.split: // CHECK-NEXT: ret void // // CHECK-DEBUG-LABEL: @_Z14parallel_for_1Pfid( // CHECK-DEBUG-NEXT: entry: // CHECK-DEBUG-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8 // CHECK-DEBUG-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: [[B_ADDR:%.*]] = alloca double, align 8 // CHECK-DEBUG-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8 // CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata float** [[R_ADDR]], [[META72:metadata !.*]], metadata !DIExpression()), [[DBG73:!dbg !.*]] // CHECK-DEBUG-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4 // CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[A_ADDR]], [[META74:metadata !.*]], metadata !DIExpression()), [[DBG75:!dbg !.*]] // CHECK-DEBUG-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8 // CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata double* [[B_ADDR]], [[META76:metadata !.*]], metadata !DIExpression()), [[DBG77:!dbg !.*]] // CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB6:@.*]]), [[DBG78:!dbg !.*]] // CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]] // CHECK-DEBUG: omp_parallel: // CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[GLOB6]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double*, float**)* @_Z14parallel_for_1Pfid..omp_par.4 to void (i32*, i32*, ...)*), i32* [[A_ADDR]], double* [[B_ADDR]], float** [[R_ADDR]]), [[DBG79:!dbg !.*]] // CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT16:%.*]] // CHECK-DEBUG: omp.par.outlined.exit16: // CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] // CHECK-DEBUG: omp.par.exit.split: // CHECK-DEBUG-NEXT: ret void, [[DBG81:!dbg !.*]] // void parallel_for_1(float *r, int a, double b) { #pragma omp parallel { #pragma omp parallel { #pragma omp for for (int i = 0; i < 100; ++i) { *r = a + b; } } } } // CHECK-LABEL: @_Z14parallel_for_2Pfid( // CHECK-NEXT: entry: // CHECK-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8 // CHECK-NEXT: [[I185:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[AGG_CAPTURED186:%.*]] = alloca [[STRUCT_ANON_17:%.*]], align 8 // CHECK-NEXT: [[AGG_CAPTURED187:%.*]] = alloca [[STRUCT_ANON_18:%.*]], align 4 // CHECK-NEXT: [[DOTCOUNT_ADDR188:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[P_LASTITER203:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[P_LOWERBOUND204:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[P_UPPERBOUND205:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[P_STRIDE206:%.*]] = alloca i32, align 4 // CHECK-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8 // CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4 // CHECK-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8 // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB1]]) // CHECK-NEXT: br label [[OMP_PARALLEL:%.*]] // CHECK: omp_parallel: // CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double*, float**)* @_Z14parallel_for_2Pfid..omp_par.23 to void (i32*, i32*, ...)*), i32* [[A_ADDR]], double* [[B_ADDR]], float** [[R_ADDR]]) // CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT184:%.*]] // CHECK: omp.par.outlined.exit184: // CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] // CHECK: omp.par.exit.split: // CHECK-NEXT: store i32 0, i32* [[I185]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON_17]], %struct.anon.17* [[AGG_CAPTURED186]], i32 0, i32 0 // CHECK-NEXT: store i32* [[I185]], i32** [[TMP0]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_18]], %struct.anon.18* [[AGG_CAPTURED187]], i32 0, i32 0 // CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[I185]], align 4 // CHECK-NEXT: store i32 [[TMP2]], i32* [[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt.19(i32* [[DOTCOUNT_ADDR188]], %struct.anon.17* [[AGG_CAPTURED186]]) // CHECK-NEXT: [[DOTCOUNT189:%.*]] = load i32, i32* [[DOTCOUNT_ADDR188]], align 4 // CHECK-NEXT: br label [[OMP_LOOP_PREHEADER190:%.*]] // CHECK: omp_loop.preheader190: // CHECK-NEXT: store i32 0, i32* [[P_LOWERBOUND204]], align 4 // CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[DOTCOUNT189]], 1 // CHECK-NEXT: store i32 [[TMP3]], i32* [[P_UPPERBOUND205]], align 4 // CHECK-NEXT: store i32 1, i32* [[P_STRIDE206]], align 4 // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM207:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB1]]) // CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* [[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM207]], i32 34, i32* [[P_LASTITER203]], i32* [[P_LOWERBOUND204]], i32* [[P_UPPERBOUND205]], i32* [[P_STRIDE206]], i32 1, i32 1) // CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[P_LOWERBOUND204]], align 4 // CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[P_UPPERBOUND205]], align 4 // CHECK-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], [[TMP4]] // CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 1 // CHECK-NEXT: br label [[OMP_LOOP_HEADER191:%.*]] // CHECK: omp_loop.header191: // CHECK-NEXT: [[OMP_LOOP_IV197:%.*]] = phi i32 [ 0, [[OMP_LOOP_PREHEADER190]] ], [ [[OMP_LOOP_NEXT199:%.*]], [[OMP_LOOP_INC194:%.*]] ] // CHECK-NEXT: br label [[OMP_LOOP_COND192:%.*]] // CHECK: omp_loop.cond192: // CHECK-NEXT: [[OMP_LOOP_CMP198:%.*]] = icmp ult i32 [[OMP_LOOP_IV197]], [[TMP7]] // CHECK-NEXT: br i1 [[OMP_LOOP_CMP198]], label [[OMP_LOOP_BODY193:%.*]], label [[OMP_LOOP_EXIT195:%.*]] // CHECK: omp_loop.body193: // CHECK-NEXT: [[TMP8:%.*]] = add i32 [[OMP_LOOP_IV197]], [[TMP4]] // CHECK-NEXT: call void @__captured_stmt.20(i32* [[I185]], i32 [[TMP8]], %struct.anon.18* [[AGG_CAPTURED187]]) // CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK-NEXT: [[CONV200:%.*]] = sitofp i32 [[TMP9]] to double // CHECK-NEXT: [[TMP10:%.*]] = load double, double* [[B_ADDR]], align 8 // CHECK-NEXT: [[ADD201:%.*]] = fadd double [[CONV200]], [[TMP10]] // CHECK-NEXT: [[CONV202:%.*]] = fptrunc double [[ADD201]] to float // CHECK-NEXT: [[TMP11:%.*]] = load float*, float** [[R_ADDR]], align 8 // CHECK-NEXT: store float [[CONV202]], float* [[TMP11]], align 4 // CHECK-NEXT: br label [[OMP_LOOP_INC194]] // CHECK: omp_loop.inc194: // CHECK-NEXT: [[OMP_LOOP_NEXT199]] = add nuw i32 [[OMP_LOOP_IV197]], 1 // CHECK-NEXT: br label [[OMP_LOOP_HEADER191]] // CHECK: omp_loop.exit195: // CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* [[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM207]]) // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM208:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB1]]) // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* [[GLOB2:@.*]], i32 [[OMP_GLOBAL_THREAD_NUM208]]) // CHECK-NEXT: br label [[OMP_LOOP_AFTER196:%.*]] // CHECK: omp_loop.after196: // CHECK-NEXT: ret void // // CHECK-DEBUG-LABEL: @_Z14parallel_for_2Pfid( // CHECK-DEBUG-NEXT: entry: // CHECK-DEBUG-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8 // CHECK-DEBUG-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: [[B_ADDR:%.*]] = alloca double, align 8 // CHECK-DEBUG-NEXT: [[I185:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: [[AGG_CAPTURED186:%.*]] = alloca [[STRUCT_ANON_17:%.*]], align 8 // CHECK-DEBUG-NEXT: [[AGG_CAPTURED187:%.*]] = alloca [[STRUCT_ANON_18:%.*]], align 4 // CHECK-DEBUG-NEXT: [[DOTCOUNT_ADDR188:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: [[P_LASTITER203:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: [[P_LOWERBOUND204:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: [[P_UPPERBOUND205:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: [[P_STRIDE206:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8 // CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata float** [[R_ADDR]], [[META133:metadata !.*]], metadata !DIExpression()), [[DBG134:!dbg !.*]] // CHECK-DEBUG-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4 // CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[A_ADDR]], [[META135:metadata !.*]], metadata !DIExpression()), [[DBG136:!dbg !.*]] // CHECK-DEBUG-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8 // CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata double* [[B_ADDR]], [[META137:metadata !.*]], metadata !DIExpression()), [[DBG138:!dbg !.*]] // CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB13:@.*]]), [[DBG139:!dbg !.*]] // CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]] // CHECK-DEBUG: omp_parallel: // CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[GLOB13]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double*, float**)* @_Z14parallel_for_2Pfid..omp_par.23 to void (i32*, i32*, ...)*), i32* [[A_ADDR]], double* [[B_ADDR]], float** [[R_ADDR]]), [[DBG140:!dbg !.*]] // CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT184:%.*]] // CHECK-DEBUG: omp.par.outlined.exit184: // CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] // CHECK-DEBUG: omp.par.exit.split: // CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[I185]], [[META144:metadata !.*]], metadata !DIExpression()), [[DBG147:!dbg !.*]] // CHECK-DEBUG-NEXT: store i32 0, i32* [[I185]], align 4, [[DBG147]] // CHECK-DEBUG-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON_17]], %struct.anon.17* [[AGG_CAPTURED186]], i32 0, i32 0, [[DBG148:!dbg !.*]] // CHECK-DEBUG-NEXT: store i32* [[I185]], i32** [[TMP0]], align 8, [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_18]], %struct.anon.18* [[AGG_CAPTURED187]], i32 0, i32 0, [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP2:%.*]] = load i32, i32* [[I185]], align 4, [[DBG149:!dbg !.*]] // CHECK-DEBUG-NEXT: store i32 [[TMP2]], i32* [[TMP1]], align 4, [[DBG148]] // CHECK-DEBUG-NEXT: call void @__captured_stmt.19(i32* [[DOTCOUNT_ADDR188]], %struct.anon.17* [[AGG_CAPTURED186]]), [[DBG148]] // CHECK-DEBUG-NEXT: [[DOTCOUNT189:%.*]] = load i32, i32* [[DOTCOUNT_ADDR188]], align 4, [[DBG148]] // CHECK-DEBUG-NEXT: br label [[OMP_LOOP_PREHEADER190:%.*]], [[DBG148]] // CHECK-DEBUG: omp_loop.preheader190: // CHECK-DEBUG-NEXT: store i32 0, i32* [[P_LOWERBOUND204]], align 4, [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP3:%.*]] = sub i32 [[DOTCOUNT189]], 1, [[DBG148]] // CHECK-DEBUG-NEXT: store i32 [[TMP3]], i32* [[P_UPPERBOUND205]], align 4, [[DBG148]] // CHECK-DEBUG-NEXT: store i32 1, i32* [[P_STRIDE206]], align 4, [[DBG148]] // CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM207:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB42:@.*]]), [[DBG148]] // CHECK-DEBUG-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* [[GLOB42]], i32 [[OMP_GLOBAL_THREAD_NUM207]], i32 34, i32* [[P_LASTITER203]], i32* [[P_LOWERBOUND204]], i32* [[P_UPPERBOUND205]], i32* [[P_STRIDE206]], i32 1, i32 1), [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP4:%.*]] = load i32, i32* [[P_LOWERBOUND204]], align 4, [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP5:%.*]] = load i32, i32* [[P_UPPERBOUND205]], align 4, [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], [[TMP4]], [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 1, [[DBG148]] // CHECK-DEBUG-NEXT: br label [[OMP_LOOP_HEADER191:%.*]], [[DBG148]] // CHECK-DEBUG: omp_loop.header191: // CHECK-DEBUG-NEXT: [[OMP_LOOP_IV197:%.*]] = phi i32 [ 0, [[OMP_LOOP_PREHEADER190]] ], [ [[OMP_LOOP_NEXT199:%.*]], [[OMP_LOOP_INC194:%.*]] ], [[DBG148]] // CHECK-DEBUG-NEXT: br label [[OMP_LOOP_COND192:%.*]], [[DBG148]] // CHECK-DEBUG: omp_loop.cond192: // CHECK-DEBUG-NEXT: [[OMP_LOOP_CMP198:%.*]] = icmp ult i32 [[OMP_LOOP_IV197]], [[TMP7]], [[DBG148]] // CHECK-DEBUG-NEXT: br i1 [[OMP_LOOP_CMP198]], label [[OMP_LOOP_BODY193:%.*]], label [[OMP_LOOP_EXIT195:%.*]], [[DBG148]] // CHECK-DEBUG: omp_loop.body193: // CHECK-DEBUG-NEXT: [[TMP8:%.*]] = add i32 [[OMP_LOOP_IV197]], [[TMP4]], [[DBG148]] // CHECK-DEBUG-NEXT: call void @__captured_stmt.20(i32* [[I185]], i32 [[TMP8]], %struct.anon.18* [[AGG_CAPTURED187]]), [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, [[DBG150:!dbg !.*]] // CHECK-DEBUG-NEXT: [[CONV200:%.*]] = sitofp i32 [[TMP9]] to double, [[DBG150]] // CHECK-DEBUG-NEXT: [[TMP10:%.*]] = load double, double* [[B_ADDR]], align 8, [[DBG151:!dbg !.*]] // CHECK-DEBUG-NEXT: [[ADD201:%.*]] = fadd double [[CONV200]], [[TMP10]], [[DBG152:!dbg !.*]] // CHECK-DEBUG-NEXT: [[CONV202:%.*]] = fptrunc double [[ADD201]] to float, [[DBG150]] // CHECK-DEBUG-NEXT: [[TMP11:%.*]] = load float*, float** [[R_ADDR]], align 8, [[DBG153:!dbg !.*]] // CHECK-DEBUG-NEXT: store float [[CONV202]], float* [[TMP11]], align 4, [[DBG154:!dbg !.*]] // CHECK-DEBUG-NEXT: br label [[OMP_LOOP_INC194]], [[DBG148]] // CHECK-DEBUG: omp_loop.inc194: // CHECK-DEBUG-NEXT: [[OMP_LOOP_NEXT199]] = add nuw i32 [[OMP_LOOP_IV197]], 1, [[DBG148]] // CHECK-DEBUG-NEXT: br label [[OMP_LOOP_HEADER191]], [[DBG148]] // CHECK-DEBUG: omp_loop.exit195: // CHECK-DEBUG-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* [[GLOB42]], i32 [[OMP_GLOBAL_THREAD_NUM207]]), [[DBG148]] // CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM208:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB42]]), [[DBG151]] // CHECK-DEBUG-NEXT: call void @__kmpc_barrier(%struct.ident_t* [[GLOB43:@.*]], i32 [[OMP_GLOBAL_THREAD_NUM208]]), [[DBG151]] // CHECK-DEBUG-NEXT: br label [[OMP_LOOP_AFTER196:%.*]], [[DBG148]] // CHECK-DEBUG: omp_loop.after196: // CHECK-DEBUG-NEXT: ret void, [[DBG155:!dbg !.*]] // void parallel_for_2(float *r, int a, double b) { #pragma omp parallel { #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; #pragma omp parallel { #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; #pragma omp parallel { #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; } #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; #pragma omp parallel { #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; } #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; } #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; } #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; } #endif
sgm.h
#ifndef RECONSTRUCTION_BASE_SEMI_GLOBAL_MATCHING_ #define RECONSTRUCTION_BASE_SEMI_GLOBAL_MATCHING_ #include <iostream> #include <fstream> #include <cassert> #include <Eigen/Core> #include <Eigen/StdVector> #include <opencv2/core/core.hpp> //#define COST_CENSUS //#define COST_ZSAD #define COST_CNN namespace recon { struct SGMParams { int disp_range; int window_sz; int penalty1; int penalty2; }; #ifdef COST_SAD // SAD typedef uint16_t CostType; typedef uint32_t ACostType; #endif #ifdef COST_ZSAD // ZSAD typedef float CostType; // for ZSAD typedef float ACostType; // accumulated cost type #endif #ifdef COST_CENSUS // Census typedef uint8_t CostType; // for 1x1 SAD, 5x5 Census typedef uint32_t ACostType; // accumulated cost type, Census #endif #ifdef COST_CNN typedef float CostType; // for 1x1 SAD, 5x5 Census typedef float ACostType; // accumulated cost type, Census typedef std::vector<std::vector<Eigen::VectorXf, Eigen::aligned_allocator<Eigen::VectorXf>>> DescriptorTensor; #endif //typedef uint8_t ACostType; // accumulated cost type - byte for Census? // if we want to use raw arrays //typedef CostType* CostArray1D; //typedef CostType*** CostArray3D; //typedef ACostType* ACostArray1D; //typedef ACostType*** ACostArray3D; typedef std::vector<CostType> CostArray1D; typedef std::vector<ACostType> ACostArray1D; typedef std::vector<std::vector<CostArray1D>> CostArray; typedef std::vector<std::vector<ACostArray1D>> ACostArray; class SGM { public: SGM(SGMParams& params) : params_(params) {} cv::Mat Compute(const std::string left_desc_path, const std::string right_desc_path); protected: void aggregate_costs(const CostArray& costs, const int dir_x, const int dir_y, ACostArray& aggr_costs); void sum_costs(const ACostArray& costs1, ACostArray& costs2); void LoadRepresentationFromFile(const std::string& desciptors_path, DescriptorTensor* descriptors); template<typename T1, typename T2> void copy_vector(const std::vector<T1>& vec1, std::vector<T2>& vec2); template<typename T1, typename T2> void sum_vectors(const std::vector<T1>& vec1, std::vector<T2>& vec2); template<typename T> T get_min(const std::vector<T>& vec); int FindMinDisp(const std::vector<CostType>& costs); int find_min_disp(const std::vector<ACostType>& costs); int find_min_disp_right(const std::vector<std::vector<ACostType>>& costs, int x); void init_costs(ACostType init_val, ACostArray& costs); cv::Mat GetDisparityImage(const CostArray& costs, int msz); cv::Mat get_disparity_matrix_float(const ACostArray& costs, int msz); cv::Mat get_disparity_image_uint16(const ACostArray& costs, int msz); cv::Mat get_disparity_image(const ACostArray& costs, int msz); void aggregate_path(const std::vector<ACostType>& prior, const std::vector<CostType>& local, std::vector<ACostType>& costs); //inline getCensusCost(); SGMParams params_; }; template<typename T1, typename T2> inline void SGM::sum_vectors(const std::vector<T1>& vec1, std::vector<T2>& vec2) { assert(vec1.size() == vec2.size()); for(size_t i = 0; i < vec2.size(); i++) vec2[i] += (T2)vec1[i]; } template<typename T1, typename T2> inline void SGM::copy_vector(const std::vector<T1>& vec1, std::vector<T2>& vec2) { assert(vec1.size() == vec2.size()); for(size_t i = 0; i < vec1.size(); i++) { vec2[i] = (T2)vec1[i]; //std::cout << "d = " << i << " - " << (T2)vec1[i] << " == " << vec2[i] << "\n"; } } inline void SGM::sum_costs(const ACostArray& costs1, ACostArray& costs2) { size_t height = costs1.size(); size_t width = costs1[0].size(); assert(costs1.size() == costs2.size()); for(size_t y = 0; y < height; y++) { assert(costs1[y].size() == costs2[y].size()); for(size_t x = 0; x < width; x++) { sum_vectors<ACostType,ACostType>(costs1[y][x], costs2[y][x]); } } } inline void SGM::aggregate_path(const std::vector<ACostType>& prior, const std::vector<CostType>& local, std::vector<ACostType>& costs) { assert(params_.disp_range == costs.size()); int P1 = params_.penalty1; int P2 = params_.penalty2; copy_vector<CostType,ACostType>(local, costs); int max_disp = params_.disp_range; ACostType min_prior = get_min<ACostType>(prior); // decrease the P2 error if the gradient is big which is a clue for the discontinuites // TODO: works very bad on KITTI... //P2 = std::max(P1, gradient ? (int)std::round(static_cast<float>(P2/gradient)) : P2); for(int d = 0; d < max_disp; d++) { ACostType error = min_prior + P2; error = std::min(error, prior[d]); if(d > 0) error = std::min(error, prior[d-1] + P1); if(d < (max_disp - 1)) error = std::min(error, prior[d+1] + P1); // ACostType can be uint8_t and e_smooth int // Normalize by subtracting min of prior cost // Now we have upper limit on cost: e_smooth <= C_max + P2 // LR check won't work without this normalization also costs[d] += (error - min_prior); } } //inline //std::vector<ACostType> SGM::aggregate_path(const std::vector<ACostType>& prior, // const std::vector<CostType>& local) //{ // int P1 = params_.penalty1; // int P2 = params_.penalty2; // std::vector<ACostType> curr_cost; // copy_vector<CostType,ACostType>(local, curr_cost); // for(int d = 0; d < params_.disp_range; d++) { // //int e_smooth = std::numeric_limits<int>::max(); // ACostType e_smooth = std::numeric_limits<ACostType>::max(); // for(int d_p = 0; d_p < params_.disp_range; d_p++) { // if(d_p - d == 0) { // // No penality // e_smooth = std::min(e_smooth, prior[d_p]); // } // else if(std::abs(d_p - d) == 1) { // // Small penality // e_smooth = std::min(e_smooth, prior[d_p] + P1); // } else { // // Large penality // //e_smooth = std::min(e_smooth, prior[d_p] + std::max(P1, path_gradient ? P2/path_gradient : P2)); // e_smooth = std::min(e_smooth, prior[d_p] + P2); // } // } // curr_cost[d] += e_smooth; // } // // // TODO: why // // Normalize by subtracting min of prior cost // //ACostType min = get_min<ACostType>(prior); // //for(size_t i = 0; i < curr_cost.size(); i++) // // curr_cost[i] -= min; // return curr_cost; //} inline void SGM::init_costs(ACostType init_val, ACostArray& costs) { size_t disp_range = costs[0][0].size(); size_t width = costs[0].size(); size_t height = costs.size(); for(size_t i = 0; i < height; i++) for(size_t j = 0; j < width; j++) for(size_t k = 0; k < disp_range; k++) costs[i][j][k] = init_val; } template<typename T> inline T SGM::get_min(const std::vector<T>& vec) { T min = vec[0]; for(size_t i = 1; i < vec.size(); i++) { if(vec[i] < min) min = vec[i]; } return min; } inline int SGM::find_min_disp(const std::vector<ACostType>& costs) { int d = 0; for(size_t i = 1; i < costs.size(); i++) { if(costs[i] < costs[d]) d = i; } return d; } inline int SGM::FindMinDisp(const std::vector<CostType>& costs) { int d = 0; for(size_t i = 1; i < costs.size(); i++) { if(costs[i] < costs[d]) d = i; } return d; } inline int SGM::find_min_disp_right(const std::vector<std::vector<ACostType>>& costs, int x) { int d = 0; //ACostType min_cost = costs[x+d][d]; int width = costs.size(); int max_disp = std::min(params_.disp_range, (width - x)); for(int i = 1; i < max_disp; i++) { if(costs[x+i][i] < costs[x+d][d]) d = i; } return d; } inline cv::Mat SGM::GetDisparityImage(const CostArray& costs, int msz) { int height = costs.size(); int width = costs[0].size(); cv::Mat img = cv::Mat::zeros(height + 2*msz, width + 2*msz, CV_8U); for(int y = 0; y < height; y++) { for(int x = 0; x < width; x++) { int d = FindMinDisp(costs[y][x]); //img.at<uint8_t>(y,x) = 4 * d; img.at<uint8_t>(msz+y, msz+x) = d; } } return img; } inline cv::Mat SGM::get_disparity_image(const ACostArray& costs, int msz) { int height = costs.size(); int width = costs[0].size(); cv::Mat img = cv::Mat::zeros(height + 2*msz, width + 2*msz, CV_8U); for(int y = 0; y < height; y++) { for(int x = 0; x < width; x++) { int d = find_min_disp(costs[y][x]); //img.at<uint8_t>(y,x) = 4 * d; img.at<uint8_t>(msz+y, msz+x) = d; } } return img; } inline cv::Mat SGM::get_disparity_image_uint16(const ACostArray& costs, int msz) { int height = costs.size(); int width = costs[0].size(); //cv::Mat img = cv::Mat::zeros(height + 2*msz, width + 2*msz, CV_16U); cv::Mat img = cv::Mat::zeros(height, width, CV_16U); for(int y = 0; y < height; y++) { for(int x = 0; x < width; x++) { // find minimum cost disparity int d = find_min_disp(costs[y][x]); // TODO: do the fast LR check if((x-d) >= 0) { int d_right = find_min_disp_right(costs[y], x-d); //std::cout << "d = " << d << " , " << " d_r = " << d_right << "\n"; if(std::abs(d - d_right) > 2) { //img.at<uint16_t>(msz+y, msz+x) = 0; img.at<uint16_t>(y,x) = 0; continue; } } else { //img.at<uint16_t>(msz+y, msz+x) = 0; img.at<uint16_t>(y,x) = 0; continue; } // perform equiangular subpixel interpolation if(d >= 1 && d < (params_.disp_range-1)) { float C_left = costs[y][x][d-1]; float C_center = costs[y][x][d]; float C_right = costs[y][x][d+1]; float d_s = 0; if(C_right < C_left) d_s = 0.5f * (C_right - C_left) / (C_center - C_left); else d_s = 0.5f * (C_right - C_left) / (C_center - C_right); //std::cout << d << " -- " << d+d_s << "\n"; //img.at<uint16_t>(msz+y, msz+x) = static_cast<uint16_t>(std::round(256.0 * (d + d_s))); img.at<uint16_t>(y,x) = static_cast<uint16_t>(std::round(256.0 * (d + d_s))); } else { //img.at<uint16_t>(msz+y, msz+x) = static_cast<uint16_t>(std::round(256.0 * d)); img.at<uint16_t>(y,x) = static_cast<uint16_t>(std::round(256.0 * d)); } } } return img; } inline cv::Mat SGM::get_disparity_matrix_float(const ACostArray& costs, int msz) { int height = costs.size(); int width = costs[0].size(); cv::Mat img = cv::Mat::zeros(height + 2*msz, width + 2*msz, CV_32F); //#pragma omp parallel for for(int y = 0; y < height; y++) { for(int x = 0; x < width; x++) { // find minimum cost disparity int d = find_min_disp(costs[y][x]); // TODO: do the fast LR check if((x-d) >= 0) { int d_right = find_min_disp_right(costs[y], x-d); //std::cout << "d = " << d << " , " << " d_r = " << d_right << "\n"; if(std::abs(d - d_right) > 2) { img.at<float>(msz+y, msz+x) = -1.0f; continue; } } else { img.at<float>(msz+y, msz+x) = -1.0f; continue; } // perform equiangular subpixel interpolation if(d >= 1 && d < (params_.disp_range-1)) { float C_left = costs[y][x][d-1]; float C_center = costs[y][x][d]; float C_right = costs[y][x][d+1]; float d_s = 0; if(C_right < C_left) d_s = 0.5f * (C_right - C_left) / (C_center - C_left); else d_s = 0.5f * (C_right - C_left) / (C_center - C_right); //std::cout << d << " -- " << d+d_s << "\n"; img.at<float>(msz+y, msz+x) = static_cast<float>(d + d_s); } else img.at<float>(msz+y, msz+x) = static_cast<float>(d); } } return img; } inline void SGM::LoadRepresentationFromFile(const std::string& descriptors_path, DescriptorTensor* descriptors) { std::ifstream file(descriptors_path, std::ios::binary); //for (int i = 0; i < 100; i++) { // //uint64_t dims; // int dims; // file.read(reinterpret_cast<char*>(&dims), sizeof(dims)); // std::cout << dims << ", "; //} int dims; file.read(reinterpret_cast<char*>(&dims), sizeof(dims)); if (dims != 3) throw 1; std::vector<uint64_t> size; size.assign(dims, 0); for (int i = 0; i < dims; i++) { //file >> size[i]; file.read(reinterpret_cast<char*>(&size[i]), sizeof(size[i])); } descriptors->resize(size[0]); for (uint64_t i = 0; i < size[0]; i++) { //(*descriptors)[i].resize(size[1]); (*descriptors)[i].assign(size[1], Eigen::VectorXf(size[2])); for (uint64_t j = 0; j < size[1]; j++) { for (uint64_t k = 0; k < size[2]; k++) { file.read(reinterpret_cast<char*>(&(*descriptors)[i][j][k]), sizeof((*descriptors)[i][j][k])); //file.read(reinterpret_cast<char*>(&(*descriptors)[i][j][k]), sizeof(float)); //std::cout << (*descriptors)[i][j][k] << "\n"; } } } } } #endif
GB_bitmap_select_template.c
//------------------------------------------------------------------------------ // GB_bitmap_select_template: C=select(A,thunk) if A is bitmap or full //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Ab and Cb can be aliased, if A is bitmap and the selection is done in-place. // Ax and Cx are not aliased. // TODO: If done in-place, Cx can be passed as NULL. Then if A is not bitmap, // C->b needs to be allocated, but not C->x. // the following macro is awkward but currently needed for the user_select op: #undef GBI #define GBI(Ai,p,avlen) i { int8_t *Ab = A->b ; GB_ATYPE *GB_RESTRICT Ax = A->x ; const int64_t avlen = A->vlen ; const int64_t avdim = A->vdim ; const size_t asize = A->type->size ; const int64_t anz = avlen * avdim ; int64_t pA, cnvals = 0 ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:cnvals) for (pA = 0 ; pA < anz ; pA++) { int64_t i = pA % avlen ; int64_t j = pA / avlen ; #if defined ( GB_ENTRY_SELECTOR ) // test the existence and value of A(i,j) int8_t cb = GBB (Ab, pA) && GB_TEST_VALUE_OF_ENTRY (pA) ; #else // test the existence and position of A(i,j) #if defined ( GB_TRIL_SELECTOR ) int8_t cb = GBB (Ab, pA) && (j-i <= ithunk) ; #elif defined ( GB_TRIU_SELECTOR ) int8_t cb = GBB (Ab, pA) && (j-i >= ithunk) ; #elif defined ( GB_DIAG_SELECTOR ) int8_t cb = GBB (Ab, pA) && (j-i == ithunk) ; #elif defined ( GB_OFFDIAG_SELECTOR ) int8_t cb = GBB (Ab, pA) && (j-i != ithunk) ; #else ASSERT (GB_DEAD_CODE) ; #endif #endif Cb [pA] = cb ; cnvals += cb ; // if (Cx != NULL) { // Cx [pA] = Ax [pA] GB_SELECT_ENTRY (Cx, pA, Ax, pA) ; } } (*cnvals_handle)= cnvals ; }